Upload folder using huggingface_hub
Browse files- .gitattributes +1 -0
- README.md +267 -0
- bias.md +13 -0
- chat_template.jinja +179 -0
- config.json +357 -0
- configuration.py +57 -0
- configuration_nemotron_h.py +245 -0
- configuration_radio.py +152 -0
- evs.py +73 -0
- explainability.md +15 -0
- generation_config.json +11 -0
- hf_quant_config.json +17 -0
- image_processing.py +148 -0
- llama_nemotron_toolcall_parser_no_streaming.py +470 -0
- model-00001-of-00004.safetensors +3 -0
- model-00002-of-00004.safetensors +3 -0
- model-00003-of-00004.safetensors +3 -0
- model-00004-of-00004.safetensors +3 -0
- model.safetensors.index.json +0 -0
- modeling.py +287 -0
- modeling_nemotron_h.py +1636 -0
- nano_v2_inference_chat_template.jinja +125 -0
- nano_v2_llm_template.jinja +1 -0
- non_reasoning_nano_v2_inference_chat_template.jinja +118 -0
- preprocessor_config.json +15 -0
- privacy.md +13 -0
- processing.py +261 -0
- processing_utils.py +83 -0
- safety.md +10 -0
- special_tokens_map.json +23 -0
- tokenizer.json +3 -0
- tokenizer_config.json +0 -0
- video_io.py +176 -0
.gitattributes
CHANGED
|
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
| 33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
| 33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
| 36 |
+
tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
README.md
ADDED
|
@@ -0,0 +1,267 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
license: other
|
| 3 |
+
license_name: nvidia-open-model-license
|
| 4 |
+
license_link: >-
|
| 5 |
+
https://www.nvidia.com/en-us/agreements/enterprise-software/nvidia-open-model-license/
|
| 6 |
+
pipeline_tag: image-text-to-text
|
| 7 |
+
library_name: transformers
|
| 8 |
+
tags:
|
| 9 |
+
- nvidia
|
| 10 |
+
- VLM
|
| 11 |
+
- FP8
|
| 12 |
+
---
|
| 13 |
+
|
| 14 |
+
# NVIDIA-Nemotron-Nano-VL-12B-V2-FP8
|
| 15 |
+
|
| 16 |
+
## Model Overview
|
| 17 |
+
|
| 18 |
+
### Description
|
| 19 |
+
|
| 20 |
+
NVIDIA-Nemotron-Nano-VL-12B-V2-FP8 is the quantized version of the NVIDIA Nemotron Nano VL V2 model, which is an auto-regressive vision language model that uses an optimized transformer architecture. For more information, please check [here](https://huggingface.co/nvidia/Nemotron-Nano-12B-v2-VL-BF16). The NVIDIA Nemotron Nano VL FP4 QAD model is quantized with [TensorRT Model Optimizer](https://github.com/NVIDIA/TensorRT-Model-Optimizer).
|
| 21 |
+
|
| 22 |
+
This model was trained on commercial images for all three stages of training and supports single image inference.
|
| 23 |
+
|
| 24 |
+
### License/Terms of Use
|
| 25 |
+
**Governing Terms:**
|
| 26 |
+
|
| 27 |
+
Your use of the model is governed by the [NVIDIA Open License Agreement](https://www.nvidia.com/en-us/agreements/enterprise-software/nvidia-open-model-license/).
|
| 28 |
+
|
| 29 |
+
**Additional Information:**
|
| 30 |
+
|
| 31 |
+
Backbone LLM: NVIDIA-Nemotron-Nano-12B-v2.
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
### Deployment Geography:
|
| 35 |
+
|
| 36 |
+
Global
|
| 37 |
+
|
| 38 |
+
### Use Case:
|
| 39 |
+
|
| 40 |
+
Customers: AI foundry enterprise customers
|
| 41 |
+
|
| 42 |
+
Use Cases: Image summarization. Text-image analysis, Optical Character Recognition, Interactive Q&A on images, Text Chain-of-Thought reasoning
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
## Release Date:
|
| 46 |
+
|
| 47 |
+
- Build.Nvidia.com [October 28th, 2025] via [nvidia/NVIDIA-Nemotron-Nano-VL-12B-V2](https://build.nvidia.com/nvidia/nvidia-nemotron-nano-vl-12b-v2)
|
| 48 |
+
- Hugging Face [October 28th, 2025] via [nvidia/NVIDIA-Nemotron-Nano-VL-12B-V2-BF16](https://huggingface.co/nvidia/Nemotron-Nano-12B-v2-VL-BF16)
|
| 49 |
+
- Hugging Face [October 28th, 2025] via [nvidia/NVIDIA-Nemotron-Nano-VL-12B-V2-FP8](https://huggingface.co/nvidia/NVIDIA-Nemotron-Nano-12B-v2-VL-FP8)
|
| 50 |
+
- Hugging Face [October 28th, 2025] via [nvidia/NVIDIA-Nemotron-Nano-VL-12B-V2-NVFP4](https://huggingface.co/nvidia/NVIDIA-Nemotron-Nano-12B-v2-VL-NVFP4-QAD)
|
| 51 |
+
|
| 52 |
+
## Model Architecture:
|
| 53 |
+
|
| 54 |
+
**Network Type:** Transformer
|
| 55 |
+
|
| 56 |
+
**Network Architecture:**
|
| 57 |
+
|
| 58 |
+
Vision Encoder: [C-RADIOv2-H](https://huggingface.co/nvidia/C-RADIOv2-VLM-H)
|
| 59 |
+
|
| 60 |
+
Language Encoder: NVIDIA-Nemotron-Nano-12B-v2
|
| 61 |
+
|
| 62 |
+
### Input
|
| 63 |
+
|
| 64 |
+
Input Type(s): Image, Text
|
| 65 |
+
- Input Images
|
| 66 |
+
- Language Supported: German, Spanish, French, Italian, Korean, Portuguese, Russian, Japanese, Chinese, English
|
| 67 |
+
|
| 68 |
+
Input Format(s): Image (Red, Green, Blue (RGB)), and Text (String)
|
| 69 |
+
|
| 70 |
+
Input Parameters: Image (2D), Text (1D)
|
| 71 |
+
|
| 72 |
+
Other Properties Related to Input:
|
| 73 |
+
|
| 74 |
+
- Context length up to 128K
|
| 75 |
+
- Maximum Resolution: Determined by a 12-tile layout constraint, with each tile being 512 × 512 pixels. This supports aspect ratios such as:
|
| 76 |
+
- 4 × 3 layout: up to 2048 × 1536 pixels
|
| 77 |
+
- 3 × 4 layout: up to 1536 × 2048 pixels
|
| 78 |
+
- 2 × 6 layout: up to 1024 × 3072 pixels
|
| 79 |
+
- 6 × 2 layout: up to 3072 × 1024 pixels
|
| 80 |
+
- Other configurations allowed, provided total tiles ≤ 12
|
| 81 |
+
- Channel Count: 3 channels (RGB)
|
| 82 |
+
- Alpha Channel: Not supported (no transparency)
|
| 83 |
+
|
| 84 |
+
### Output
|
| 85 |
+
Output Type(s): Text
|
| 86 |
+
|
| 87 |
+
Output Formats: String
|
| 88 |
+
|
| 89 |
+
Output Parameters: One-Dimensional (1D): Sequences up to 128K
|
| 90 |
+
|
| 91 |
+
|
| 92 |
+
|
| 93 |
+
Our AI models are designed and/or optimized to run on NVIDIA GPU-accelerated systems. By leveraging NVIDIA’s hardware (e.g. GPU cores) and software frameworks (e.g., CUDA libraries), the model achieves faster training and inference times compared to CPU-only solutions.
|
| 94 |
+
|
| 95 |
+
### Software Integration
|
| 96 |
+
Runtime Engine(s): vLLM<br>
|
| 97 |
+
Supported Hardware Microarchitecture Compatibility: H100 SXM 80GB<br>
|
| 98 |
+
Supported Operating System(s): Linux<br>
|
| 99 |
+
|
| 100 |
+
### Model Versions:
|
| 101 |
+
Nemotron-Nano-VL-12B-V2-FP8
|
| 102 |
+
|
| 103 |
+
## Quick Start
|
| 104 |
+
|
| 105 |
+
### Install Dependencies
|
| 106 |
+
```
|
| 107 |
+
pip install causal_conv1d "transformers>4.53,<4.54" torch timm "mamba-ssm==2.2.5" accelerate open_clip_torch numpy pillow
|
| 108 |
+
```
|
| 109 |
+
|
| 110 |
+
### Usage
|
| 111 |
+
|
| 112 |
+
To serve this checkpoint with [vLLM](https://github.com/vllm-project/vllm), you can start the docker `vllm/vllm-openai:nightly` and run the sample command below:
|
| 113 |
+
|
| 114 |
+
```sh
|
| 115 |
+
python3 -m vllm.entrypoints.openai.api_server --model nvidia/Nemotron-Nano-VL-12B-V2-FP8 --trust-remote-code --quantization modelopt
|
| 116 |
+
```
|
| 117 |
+
|
| 118 |
+
|
| 119 |
+
## Training, Testing, and Evaluation Datasets:
|
| 120 |
+
|
| 121 |
+
### Training Datasets:
|
| 122 |
+
|
| 123 |
+
**Data Modalities** <br>
|
| 124 |
+
** Total Size: 39'486'703 samples <br>
|
| 125 |
+
** Total Number of Datasets: 270 <br>
|
| 126 |
+
** Text-only datasets: 33 <br>
|
| 127 |
+
** Text-and-image datasets: 176 <br>
|
| 128 |
+
** Video-and-text datasets: 61 <br>
|
| 129 |
+
** Total size: 27.7 TB <br>
|
| 130 |
+
|
| 131 |
+
** Data modalities: Text, Image, Video <br>
|
| 132 |
+
** Data Collection Method by dataset: Hybrid: Automated, Human, Synthetic <br>
|
| 133 |
+
** Labeling Method by dataset: Hybrid: Automated, Human, Synthetic <br>
|
| 134 |
+
|
| 135 |
+
** Dataset partition: Training [100%], Testing [0%], Validation [0%] <br>
|
| 136 |
+
** Time period for training data collection: 2023-2025 <br>
|
| 137 |
+
** Time period for testing data collection: N/A <br>
|
| 138 |
+
** Time period for validation data collection: N/A <br>
|
| 139 |
+
|
| 140 |
+
The post-training datasets consist of a mix of internal and public datasets designed for training vision language models across various tasks. It includes:
|
| 141 |
+
|
| 142 |
+
* Public datasets sourced from publicly available images and annotations, supporting tasks like classification, captioning, visual question answering, conversation modeling, document analysis and text/image reasoning.
|
| 143 |
+
* Internal text and image datasets built with public commercial images and internal labels, adapted for the same tasks as listed above.
|
| 144 |
+
* Synthetic image datasets generated programmatically for specific tasks like tabular data understanding and optical character recognition (OCR), for English, Chinese as well as other languages.
|
| 145 |
+
* Video datasets supporting video question answering and reasoning tasks from publicly available video sources, with either publicly available or internally generated annotations.
|
| 146 |
+
* Specialized datasets for safety alignment, function calling, and domain-specific tasks (e.g., science diagrams, financial question answering).
|
| 147 |
+
* NVIDIA-Sourced Synthetic Datasets for text reasoning.
|
| 148 |
+
* Private datasets for safety alignment or VQA on invoices.
|
| 149 |
+
* Crawled or scraped captioning, VQA, and video datasets.
|
| 150 |
+
* Some datasets were improved with Qwen2.5-72B-Instruct annotations
|
| 151 |
+
|
| 152 |
+
For around ~30% of our total training corpus and several of the domains listed above, we used commercially permissive models to perform:
|
| 153 |
+
* Language translation
|
| 154 |
+
* Re-labeling of annotations for text, image and video datasets
|
| 155 |
+
* Synthetic data generation
|
| 156 |
+
* Generating chain-of-thought (CoT) traces
|
| 157 |
+
|
| 158 |
+
Additional processing for several datasets included rule-based QA generation (e.g., with templates), expanding short answers into longer responses, as well as proper reformatting. More details can be found [here](https://arxiv.org/abs/2501.14818).
|
| 159 |
+
|
| 160 |
+
|
| 161 |
+
** Image based datasets were all scanned against known CSAM to make sure no such content was included in training.<br>
|
| 162 |
+
|
| 163 |
+
# Public Datasets <br>
|
| 164 |
+
| Type | Data Type | Total Samples | Total Size (GB) |
|
| 165 |
+
|------|-----------|---------------|------------------|
|
| 166 |
+
| Function call | text | 8,000 | 0.02 |
|
| 167 |
+
| Image Captioning | image, text | 1,422,102 | 1,051.04 |
|
| 168 |
+
| Image Reasoning | image, text | 1,888,217 | 286.95 |
|
| 169 |
+
| OCR | image, text | 9,830,570 | 5,317.60 |
|
| 170 |
+
| Referring Expression Grounding | image, text | 14,694 | 2.39 |
|
| 171 |
+
| Safety | image, text | 34,187 | 9.21 |
|
| 172 |
+
| Safety | text | 57,223 | 0.52 |
|
| 173 |
+
| Safety | video, text | 12,988 | 11.78 |
|
| 174 |
+
| Text Instruction Tuning | text | 245,056 | 1.13 |
|
| 175 |
+
| Text Reasoning | text | 225,408 | 4.55 |
|
| 176 |
+
| VQA | image, text | 8,174,136 | 2,207.52 |
|
| 177 |
+
| VQA | video, text | 40,000 | 46.05 |
|
| 178 |
+
| Video Captioning | video, text | 3,289 | 6.31 |
|
| 179 |
+
| Video Reasoning | video, text | 42,620 | 49.10 |
|
| 180 |
+
| VideoQA | video, text | 1,371,923 | 17,641.79 |
|
| 181 |
+
| Visual Instruction Tuning | image, text | 1,173,877 | 167.79 |
|
| 182 |
+
| **TOTAL** | | **24,544,290** | **26,803.75** |
|
| 183 |
+
|
| 184 |
+
# Private Datasets <br>
|
| 185 |
+
| Type | Modalities | Total Samples | Total Size (GB) |
|
| 186 |
+
|------|------------|---------------|------------------|
|
| 187 |
+
| Image Reasoning | image, text | 17,729 | 15.41 |
|
| 188 |
+
| Text Reasoning | text | 445,958 | 9.01 |
|
| 189 |
+
| **TOTAL** | | **463,687** | **24.42** |
|
| 190 |
+
|
| 191 |
+
|
| 192 |
+
# Data Crawling and Scraping <br>
|
| 193 |
+
| Type | Modalities | Total Samples | Total Size (GB) |
|
| 194 |
+
|------|------------|---------------|------------------|
|
| 195 |
+
| Image Captioning | image, text | 39,870 | 10.24 |
|
| 196 |
+
| VQA | image, text | 40,348 | 3.94 |
|
| 197 |
+
| VideoQA | video, text | 288,728 | 393.30 |
|
| 198 |
+
| **TOTAL** | | **368,946** | **407.48** |
|
| 199 |
+
|
| 200 |
+
# User-Sourced Data (Collected by Provider including Prompts) <br>
|
| 201 |
+
<br>
|
| 202 |
+
|
| 203 |
+
# Self-Sourced Synthetic Data <br>
|
| 204 |
+
| Type | Data Type | Total Samples | Total Size (GB) |
|
| 205 |
+
|------|-----------|---------------|------------------|
|
| 206 |
+
| Code | text | 1,165,591 | 54.15 |
|
| 207 |
+
| OCR | image, text | 216,332 | 83.53 |
|
| 208 |
+
| Text Reasoning | text | 12,727,857 | 295.80 |
|
| 209 |
+
| **TOTAL** | | **14,109,780** | **433.48** |
|
| 210 |
+
|
| 211 |
+
|
| 212 |
+
**Properties**<br>
|
| 213 |
+
* Additionally, the dataset collection (for training and evaluation) consists of a mix of internal and public datasets designed for training and evaluation across various tasks. It includes:
|
| 214 |
+
* Internal datasets built with public commercial images and internal labels, supporting tasks like conversation modeling and document analysis.
|
| 215 |
+
* Public datasets sourced from publicly available images and annotations, adapted for tasks such as image captioning and visual question answering.
|
| 216 |
+
* Synthetic datasets generated programmatically for specific tasks like tabular data understanding.
|
| 217 |
+
* Specialized datasets for safety alignment, function calling, and domain-specific tasks (e.g., science diagrams, financial question answering).
|
| 218 |
+
|
| 219 |
+
### Evaluation Datasets:
|
| 220 |
+
The following external benchmarks are used for evaluating the model: <br>
|
| 221 |
+
|
| 222 |
+
| Dataset |
|
| 223 |
+
|---------|
|
| 224 |
+
| [AI2D Test](https://prior.allenai.org/projects/diagram-understanding ) |
|
| 225 |
+
| [ChartQA Test](https://github.com/vis-nlp/ChartQA) |
|
| 226 |
+
| [OCRBench](https://github.com/Yuliang-Liu/MultimodalOCR) |
|
| 227 |
+
| [OCRBenchV2](https://github.com/Yuliang-Liu/MultimodalOCR) English |
|
| 228 |
+
| [DocVQA Val](https://www.docvqa.org/datasets) |
|
| 229 |
+
|
| 230 |
+
|
| 231 |
+
|
| 232 |
+
Data Collection Method by dataset: <br>
|
| 233 |
+
* Hybrid: Human, Automated <br>
|
| 234 |
+
|
| 235 |
+
Labeling Method by dataset: <br>
|
| 236 |
+
* Hybrid: Human, Automated <br>
|
| 237 |
+
|
| 238 |
+
**Properties (Quantity, Dataset Descriptions, Sensor(s)):** N/A <br>
|
| 239 |
+
|
| 240 |
+
**Dataset License(s):** N/A <br>
|
| 241 |
+
|
| 242 |
+
|
| 243 |
+
|
| 244 |
+
## Evaluation Benchmarks:
|
| 245 |
+
|
| 246 |
+
| Benchmark | Score (FP8) | Score (BF16)
|
| 247 |
+
| --- | --- | --- |
|
| 248 |
+
| AI2D | 87.6% | 87.1% |
|
| 249 |
+
| OCRBenchV2 | 61.8% | 62.0% |
|
| 250 |
+
| OCRBench | 85.4% | 85.6% |
|
| 251 |
+
| ChartQA | 89.4% | 89.7% |
|
| 252 |
+
| DocVQA val | 94.3% | 94.4% |
|
| 253 |
+
|
| 254 |
+
|
| 255 |
+
|
| 256 |
+
# Inference:
|
| 257 |
+
**Engine:** vLLM <br>
|
| 258 |
+
**Test Hardware:** <br>
|
| 259 |
+
* 1x NVIDIA H100 SXM 80GB
|
| 260 |
+
|
| 261 |
+
|
| 262 |
+
## Ethical Considerations:
|
| 263 |
+
NVIDIA believes Trustworthy AI is a shared responsibility and we have established policies and practices to enable development for a wide array of AI applications. When downloaded or used in accordance with our terms of service, developers should work with their internal model team to ensure this model meets requirements for the relevant industry and use case and addresses unforeseen product misuse. For more detailed information on ethical considerations for this model, please see the Model Card++ [Explainability](explainability.md), [Bias](bias.md), [Safety & Security](safety.md), and [Privacy](privacy.md) Subcards. Please report security vulnerabilities or NVIDIA AI Concerns [here](https://www.nvidia.com/en-us/support/submit-security-vulnerability/).
|
| 264 |
+
|
| 265 |
+
Users are responsible for model inputs and outputs. Users are responsible for ensuring safe integration of this model, including implementing guardrails as well as other safety mechanisms, prior to deployment.
|
| 266 |
+
|
| 267 |
+
Outputs generated by these models may contain political content or other potentially misleading information, issues with content security and safety, or unwanted bias that is independent of our oversight.
|
bias.md
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
| Field | Response |
|
| 2 |
+
|:---|:---|
|
| 3 |
+
| Participation considerations from adversely impacted groups [protected classes](https://www.senate.ca.gov/content/protected-classes) in model design and testing: | None |
|
| 4 |
+
| Bias Metric (If Measured): | [BBQ Accuracy Scores in Ambiguous Contexts](https://github.com/nyu-mll/BBQ/) |
|
| 5 |
+
| Which characteristic (feature) show(s) the greatest difference in performance?: | The model shows high variance across many characteristics when used at a high temperature, with the greatest measurable difference seen in categories such as Gender Identity and Race x Gender. |
|
| 6 |
+
| Which feature(s) have the worst performance overall? | Age (ambiguous) has both the lowest category accuracy listed (0.75) and a notably negative bias score (–0.56), indicating it is the worst-performing feature overall in this evaluation. |
|
| 7 |
+
| Measures taken to mitigate against unwanted bias: | None |
|
| 8 |
+
| If using internal data, description of methods implemented in data acquisition or processing, if any, to address the prevalence of identifiable biases in the training, testing, and validation data: | The training datasets contain a large amount of synthetic data generated by LLMs. We manually curated prompts. |
|
| 9 |
+
| Tools used to assess statistical imbalances and highlight patterns that may introduce bias into AI models: | Bias Benchmark for Question Answering (BBQ) |
|
| 10 |
+
| Tools used to assess statistical imbalances and highlight patterns that may introduce bias into AI models: | The datasets, which include video datasets (e.g., YouCook2, VCG Human Dataset) and image captioning datasets, do not collectively or exhaustively represent all demographic groups (and proportionally therein).
|
| 11 |
+
For instance, these datasets do not contain explicit mentions of demographic classes such as age, gender, or ethnicity in over 80% of samples. In the subset where analysis was performed, certain datasets contain skews in the representation of participants—for example, perceived gender of "female" participants may be significant compared to "male" participants for certain datasets. Separately, individuals aged "40 to 49 years" and “20 to 29 years” are the most frequent among ethnic identifiers. Toxicity analysis was additionally performed on several datasets to identify potential not-safe-for-work samples and risks.
|
| 12 |
+
To mitigate these imbalances, we recommend considering evaluation techniques such as bias audits, fine-tuning with demographically balanced datasets, and mitigation strategies like counterfactual data augmentation to align with the desired model behavior. This evaluation was conducted on a data subset ranging from 200 to 3,000 samples per dataset; as such, certain limitations may exist in the reliability of the embeddings. A baseline of 200 samples was used across all datasets, with larger subsets of up to 3,000 samples utilized for certain in-depth analyses.
|
| 13 |
+
|
|
chat_template.jinja
ADDED
|
@@ -0,0 +1,179 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{%- set ns = namespace(enable_thinking=false, has_sys_prompt=false, non_tool_system_content='', has_video=false, explicit_think_requested=false) -%}
|
| 2 |
+
{%- set msg = namespace(content='') -%}
|
| 3 |
+
{%- for message in messages -%}
|
| 4 |
+
{%- if message['role'] == 'system' -%}
|
| 5 |
+
{%- set ns.has_sys_prompt = true -%}
|
| 6 |
+
{# Extract system content without tool flags #}
|
| 7 |
+
{%- if message['content'] is string -%}
|
| 8 |
+
{%- set ns.non_tool_system_content = message['content'].replace('</think>', '<_end_think>').replace('/think', '').replace('/no_think', '').replace('<_end_think>', '</think>').strip() -%}
|
| 9 |
+
{%- else -%}
|
| 10 |
+
{%- set ns.non_tool_system_content = '' -%}
|
| 11 |
+
{%- for content in message['content'] -%}
|
| 12 |
+
{%- if content['type'] == 'text' -%}
|
| 13 |
+
{%- set ns.non_tool_system_content = ns.non_tool_system_content + content['text'].replace('</think>', '<_end_think>').replace('/think', '').replace('/no_think', '').replace('<_end_think>', '</think>') -%}
|
| 14 |
+
{%- endif -%}
|
| 15 |
+
{%- endfor -%}
|
| 16 |
+
{%- set ns.non_tool_system_content = ns.non_tool_system_content.strip() -%}
|
| 17 |
+
{%- endif -%}
|
| 18 |
+
{%- endif -%}
|
| 19 |
+
{# Check for video content in all messages #}
|
| 20 |
+
{%- if message['content'] is not string -%}
|
| 21 |
+
{%- for content in message['content'] -%}
|
| 22 |
+
{%- if content['type'] == 'video' or content['type'] == 'video_url' -%}
|
| 23 |
+
{%- set ns.has_video = true -%}
|
| 24 |
+
{%- endif -%}
|
| 25 |
+
{%- endfor -%}
|
| 26 |
+
{%- endif -%}
|
| 27 |
+
{%- if message['content'] is string -%}
|
| 28 |
+
{%- if message['role'] == 'user' or message['role'] == 'system' -%}
|
| 29 |
+
{%- if '/think' in message['content'].replace('</think>', '') -%}
|
| 30 |
+
{%- set ns.enable_thinking = true -%}
|
| 31 |
+
{%- set ns.explicit_think_requested = true -%}
|
| 32 |
+
{%- elif '/no_think' in message['content'] -%}
|
| 33 |
+
{%- set ns.enable_thinking = false -%}
|
| 34 |
+
{%- endif -%}
|
| 35 |
+
{%- endif -%}
|
| 36 |
+
{%- else -%}
|
| 37 |
+
{%- for content in message['content'] -%}
|
| 38 |
+
{%- if content['type'] == 'text' -%}
|
| 39 |
+
{%- if message['role'] == 'user' or message['role'] == 'system' -%}
|
| 40 |
+
{%- if '/think' in content['text'].replace('</think>', '') -%}
|
| 41 |
+
{%- set ns.enable_thinking = true -%}
|
| 42 |
+
{%- set ns.explicit_think_requested = true -%}
|
| 43 |
+
{%- elif '/no_think' in content['text'] -%}
|
| 44 |
+
{%- set ns.enable_thinking = false -%}
|
| 45 |
+
{%- endif -%}
|
| 46 |
+
{%- endif -%}
|
| 47 |
+
{%- endif -%}
|
| 48 |
+
{%- endfor -%}
|
| 49 |
+
{%- endif -%}
|
| 50 |
+
{%- endfor -%}
|
| 51 |
+
|
| 52 |
+
{# Error out if video is present and reasoning is explicitly requested #}
|
| 53 |
+
{%- if ns.has_video and ns.explicit_think_requested -%}
|
| 54 |
+
{{ raise_exception('Video inputs are not supported with reasoning mode. Please remove /think flag or remove video content.') }}
|
| 55 |
+
{%- endif -%}
|
| 56 |
+
|
| 57 |
+
{# Automatically disable reasoning if video is present (without explicit /think request) #}
|
| 58 |
+
{%- if ns.has_video and not ns.explicit_think_requested -%}
|
| 59 |
+
{%- set ns.enable_thinking = false -%}
|
| 60 |
+
{%- endif -%}
|
| 61 |
+
|
| 62 |
+
{%- if messages[0]['role'] != 'system' -%}
|
| 63 |
+
{{- '<SPECIAL_10>System\n' -}}
|
| 64 |
+
{%- else -%}
|
| 65 |
+
{{- '<SPECIAL_10>System\n' + ns.non_tool_system_content }}
|
| 66 |
+
{%- endif -%}
|
| 67 |
+
|
| 68 |
+
{%- if tools -%}
|
| 69 |
+
{%- if ns.non_tool_system_content != '' -%}
|
| 70 |
+
{{- '\n\n' -}}
|
| 71 |
+
{%- endif -%}
|
| 72 |
+
{{- 'You can use the following tools to assist the user if required:\n' -}}
|
| 73 |
+
{{- '<AVAILABLE_TOOLS>[' -}}
|
| 74 |
+
{%- for tool in tools -%}
|
| 75 |
+
{{- (tool.function if tool.function is defined else tool) | tojson -}}
|
| 76 |
+
{{- ', ' if not loop.last else '' -}}
|
| 77 |
+
{%- endfor -%}
|
| 78 |
+
{{- ']</AVAILABLE_TOOLS>\n\n' -}}
|
| 79 |
+
|
| 80 |
+
{{- 'If you decide to call any tool(s), use the following format:\n' -}}
|
| 81 |
+
{{- '<TOOLCALL>[{"name": "tool_name1", "arguments": "tool_args1"}, ' -}}
|
| 82 |
+
{{- '{"name": "tool_name2", "arguments": "tool_args2"}]</TOOLCALL>\n\n' -}}
|
| 83 |
+
|
| 84 |
+
{{- 'The user will execute tool-calls and return responses from tool(s) in this format:\n' -}}
|
| 85 |
+
{{- '<TOOL_RESPONSE>[{"response": "tool_response1"}, ' -}}
|
| 86 |
+
{{- '{"response": "tool_response2"}]</TOOL_RESPONSE>\n\n' -}}
|
| 87 |
+
|
| 88 |
+
{{- 'Based on the tool responses, you can call additional tools if needed, ' -}}
|
| 89 |
+
{{- 'correct tool calls if any errors are found, or just respond to the user.' -}}
|
| 90 |
+
{%- endif -%}
|
| 91 |
+
{{- '\n' -}}
|
| 92 |
+
|
| 93 |
+
{%- set messages = messages[1:] if messages[0]['role'] == 'system' else messages -%}
|
| 94 |
+
|
| 95 |
+
{# Prevent no user or assistant message #}
|
| 96 |
+
{%- if messages|length == 0 -%}
|
| 97 |
+
{%- set messages = [{'role': 'user', 'content': ''}] -%}
|
| 98 |
+
{%- endif -%}
|
| 99 |
+
|
| 100 |
+
{%- for message in messages %}
|
| 101 |
+
{%- if message['content'] is string -%}
|
| 102 |
+
{%- set msg.content = message['content'].replace('</think>', '<_end_think>').replace('/think', '').replace('/no_think', '').replace('<_end_think>', '</think>').strip() -%}
|
| 103 |
+
{%- else -%}
|
| 104 |
+
{%- set msg.content = '' -%}
|
| 105 |
+
{%- set mm_content = '' -%}
|
| 106 |
+
{%- set counters = namespace(images=0, videos=0) -%}
|
| 107 |
+
|
| 108 |
+
{%- for content in message['content'] -%}
|
| 109 |
+
{%- if content['type'] == 'image' -%}
|
| 110 |
+
{%- set counters.images = counters.images + 1 -%}
|
| 111 |
+
{%- elif content['type'] == 'video' -%}
|
| 112 |
+
{%- set counters.videos = counters.videos + 1 -%}
|
| 113 |
+
{%- elif content['type'] == 'text' -%}
|
| 114 |
+
{%- set msg.content = msg.content + content['text'] -%}
|
| 115 |
+
{%- endif -%}
|
| 116 |
+
{%- endfor -%}
|
| 117 |
+
{%- if '<image>' in msg.content -%}
|
| 118 |
+
{%- set counters.images = 0 -%}
|
| 119 |
+
{%- endif -%}
|
| 120 |
+
{%- if '<video>' in msg.content -%}
|
| 121 |
+
{%- set counters.videos = 0 -%}
|
| 122 |
+
{%- endif -%}
|
| 123 |
+
{%- if counters.images > 1 -%}
|
| 124 |
+
{%- set image_tags = namespace(tags=[]) -%}
|
| 125 |
+
{%- for i in range(counters.images) -%}
|
| 126 |
+
{%- set image_tags.tags = image_tags.tags + ['<image ' + (i + 1)|string + '><image>'] -%}
|
| 127 |
+
{%- endfor -%}
|
| 128 |
+
{%- set mm_content = ' '.join(image_tags.tags) + '\n' -%}
|
| 129 |
+
{%- elif counters.images == 1 -%}
|
| 130 |
+
{%- set mm_content = '<image>\n' -%}
|
| 131 |
+
{%- endif -%}
|
| 132 |
+
{%- set mm_content = mm_content + '<video>\n' * counters.videos -%}
|
| 133 |
+
{%- set msg.content = mm_content + msg.content.lstrip('\n') -%}
|
| 134 |
+
{%- endif -%}
|
| 135 |
+
|
| 136 |
+
{%- if message['role'] == 'user' %}
|
| 137 |
+
{{- '<SPECIAL_11>User\n' + msg.content.replace('</think>', '<_end_think>').replace('/think', '').replace('/no_think', '').replace('<_end_think>', '</think>').strip() + '\n' }}
|
| 138 |
+
{%- elif message['role'] == 'tool' %}
|
| 139 |
+
{%- if loop.first or (messages[loop.index0 - 1].role != 'tool') -%}
|
| 140 |
+
{{- '<SPECIAL_11>User\n' + '<TOOL_RESPONSE>[' }}
|
| 141 |
+
{%- endif -%}
|
| 142 |
+
{{- msg.content -}}
|
| 143 |
+
{{- ', ' if not loop.last and (messages[loop.index0 + 1].role == 'tool') else '' -}}
|
| 144 |
+
{%- if loop.last or (messages[loop.index0 + 1].role != 'tool') -%}
|
| 145 |
+
{{- ']</TOOL_RESPONSE>\n' -}}
|
| 146 |
+
{%- endif -%}
|
| 147 |
+
{%- elif message['role'] == 'assistant' %}
|
| 148 |
+
{%- if '</think>' in msg.content %}
|
| 149 |
+
{%- set msg.content = msg.content.split('</think>')[1].strip() %}
|
| 150 |
+
{%- endif %}
|
| 151 |
+
{{- '<SPECIAL_11>Assistant\n' + msg.content.strip() }}
|
| 152 |
+
{%- if message.tool_calls -%}
|
| 153 |
+
{%- if msg.content.strip() != '' -%}
|
| 154 |
+
{{- '\n\n' -}}
|
| 155 |
+
{%- endif -%}
|
| 156 |
+
{{- '<TOOLCALL>[' -}}
|
| 157 |
+
{%- for call in message.tool_calls -%}
|
| 158 |
+
{%- set fn = call.function if call.function is defined else call -%}
|
| 159 |
+
{{- '{"name": "' + fn.name + '", "arguments": ' -}}
|
| 160 |
+
{%- if fn.arguments is string -%}
|
| 161 |
+
{{- fn.arguments -}}
|
| 162 |
+
{%- else -%}
|
| 163 |
+
{{- fn.arguments | tojson -}}
|
| 164 |
+
{%- endif -%}
|
| 165 |
+
{{- '}' + (', ' if not loop.last else '') -}}
|
| 166 |
+
{%- endfor -%}
|
| 167 |
+
{{- ']</TOOLCALL>' -}}
|
| 168 |
+
{%- endif -%}
|
| 169 |
+
{{- '\n<SPECIAL_12>\n' -}}
|
| 170 |
+
{%- endif %}
|
| 171 |
+
{%- endfor -%}
|
| 172 |
+
{%- if add_generation_prompt %}
|
| 173 |
+
{{- '<SPECIAL_11>Assistant\n' }}
|
| 174 |
+
{%- if ns.enable_thinking is defined and ns.enable_thinking is false %}
|
| 175 |
+
{{- '<think></think>' }}
|
| 176 |
+
{%- else %}
|
| 177 |
+
{{- '<think>\n' }}
|
| 178 |
+
{%- endif %}
|
| 179 |
+
{%- endif %}
|
config.json
ADDED
|
@@ -0,0 +1,357 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"architectures": [
|
| 3 |
+
"NemotronH_Nano_VL_V2"
|
| 4 |
+
],
|
| 5 |
+
"auto_map": {
|
| 6 |
+
"AutoConfig": "configuration.NemotronH_Nano_VL_V2_Config",
|
| 7 |
+
"AutoModel": "modeling.NemotronH_Nano_VL_V2",
|
| 8 |
+
"AutoModelForCausalLM": "modeling.NemotronH_Nano_VL_V2"
|
| 9 |
+
},
|
| 10 |
+
"downsample_ratio": 0.5,
|
| 11 |
+
"eos_token_id": 12,
|
| 12 |
+
"force_image_size": 512,
|
| 13 |
+
"image_tag_type": "internvl",
|
| 14 |
+
"img_context_token": "<image>",
|
| 15 |
+
"img_context_token_id": 131072,
|
| 16 |
+
"img_end_token": "</img>",
|
| 17 |
+
"img_start_token": "<img>",
|
| 18 |
+
"llm_config": {
|
| 19 |
+
"architectures": [
|
| 20 |
+
"NemotronHForCausalLM"
|
| 21 |
+
],
|
| 22 |
+
"attention_bias": false,
|
| 23 |
+
"attention_dropout": 0.0,
|
| 24 |
+
"attention_head_dim": 128,
|
| 25 |
+
"auto_map": {
|
| 26 |
+
"AutoConfig": "nvidia/NVIDIA-Nemotron-Nano-12B-v2-Base--configuration_nemotron_h.NemotronHConfig",
|
| 27 |
+
"AutoModelForCausalLM": "nvidia/NVIDIA-Nemotron-Nano-12B-v2-Base--modeling_nemotron_h.NemotronHForCausalLM"
|
| 28 |
+
},
|
| 29 |
+
"chunk_size": 128,
|
| 30 |
+
"conv_kernel": 4,
|
| 31 |
+
"eos_token_id": 12,
|
| 32 |
+
"expand": 2,
|
| 33 |
+
"head_dim": 128,
|
| 34 |
+
"hidden_dropout": 0.0,
|
| 35 |
+
"hidden_size": 5120,
|
| 36 |
+
"hybrid_override_pattern": "M-M-M-M*-M-M-M-M*-M-M-M-M*-M-M-M-M*-M-M-M-M*-M-M-M-M*-M-M-M-M-",
|
| 37 |
+
"initializer_range": 0.02,
|
| 38 |
+
"intermediate_size": 20480,
|
| 39 |
+
"layer_norm_epsilon": 1e-05,
|
| 40 |
+
"mamba_head_dim": 80,
|
| 41 |
+
"mamba_hidden_act": "silu",
|
| 42 |
+
"mamba_num_heads": 128,
|
| 43 |
+
"mamba_proj_bias": false,
|
| 44 |
+
"max_position_embeddings": 131072,
|
| 45 |
+
"mlp_bias": false,
|
| 46 |
+
"mlp_hidden_act": "relu2",
|
| 47 |
+
"model_type": "nemotron_h",
|
| 48 |
+
"n_groups": 8,
|
| 49 |
+
"num_attention_heads": 40,
|
| 50 |
+
"num_hidden_layers": 62,
|
| 51 |
+
"num_key_value_heads": 8,
|
| 52 |
+
"num_logits_to_keep": 1,
|
| 53 |
+
"rescale_prenorm_residual": true,
|
| 54 |
+
"residual_in_fp32": false,
|
| 55 |
+
"rms_norm_eps": 1e-05,
|
| 56 |
+
"sliding_window": null,
|
| 57 |
+
"ssm_state_size": 128,
|
| 58 |
+
"time_step_floor": 0.0001,
|
| 59 |
+
"time_step_limit": [
|
| 60 |
+
0.0,
|
| 61 |
+
Infinity
|
| 62 |
+
],
|
| 63 |
+
"time_step_max": 0.1,
|
| 64 |
+
"time_step_min": 0.001,
|
| 65 |
+
"time_step_rank": 256,
|
| 66 |
+
"torch_dtype": "bfloat16",
|
| 67 |
+
"use_bias": false,
|
| 68 |
+
"use_cache": true,
|
| 69 |
+
"use_conv_bias": true,
|
| 70 |
+
"use_mamba_kernels": true,
|
| 71 |
+
"vocab_size": 132096
|
| 72 |
+
},
|
| 73 |
+
"max_sequence_length": 131072,
|
| 74 |
+
"model_type": "NemotronH_Nano_VL_V2",
|
| 75 |
+
"norm_mean": [
|
| 76 |
+
0.48145466,
|
| 77 |
+
0.4578275,
|
| 78 |
+
0.40821073
|
| 79 |
+
],
|
| 80 |
+
"norm_std": [
|
| 81 |
+
0.26862954,
|
| 82 |
+
0.26130258,
|
| 83 |
+
0.27577711
|
| 84 |
+
],
|
| 85 |
+
"patch_size": 16,
|
| 86 |
+
"projector_hidden_size": 20480,
|
| 87 |
+
"ps_version": "v2",
|
| 88 |
+
"template": "n5h_5p5_nanov2",
|
| 89 |
+
"torch_dtype": "bfloat16",
|
| 90 |
+
"transformers_version": "4.53.3",
|
| 91 |
+
"use_thumbnail": true,
|
| 92 |
+
"video_context_token": "<video>",
|
| 93 |
+
"video_context_token_id": 131081,
|
| 94 |
+
"video_pruning_rate": 0.7,
|
| 95 |
+
"vision_config": {
|
| 96 |
+
"adaptor_configs": {},
|
| 97 |
+
"adaptor_names": null,
|
| 98 |
+
"architectures": [
|
| 99 |
+
"RADIOModel"
|
| 100 |
+
],
|
| 101 |
+
"args": {
|
| 102 |
+
"aa": null,
|
| 103 |
+
"amp": true,
|
| 104 |
+
"amp_dtype": "bfloat16",
|
| 105 |
+
"amp_impl": "native",
|
| 106 |
+
"aug_repeats": 0,
|
| 107 |
+
"aug_splits": 0,
|
| 108 |
+
"bn_eps": null,
|
| 109 |
+
"bn_momentum": null,
|
| 110 |
+
"cache_dir": null,
|
| 111 |
+
"channels_last": false,
|
| 112 |
+
"checkpoint_hist": 10,
|
| 113 |
+
"chk_keep_forever": 100,
|
| 114 |
+
"class_map": "",
|
| 115 |
+
"clip_grad": null,
|
| 116 |
+
"clip_mode": "norm",
|
| 117 |
+
"cls_token_per_teacher": true,
|
| 118 |
+
"coco_annotations_file": "/datasets/coco2017-adlsa/annotations/captions_val2017.json",
|
| 119 |
+
"coco_image_dir": "/datasets/coco2017-adlsa/val2017",
|
| 120 |
+
"color_jitter": 0.4,
|
| 121 |
+
"cooldown_epochs": 0,
|
| 122 |
+
"cpe_max_size": 2048,
|
| 123 |
+
"crd_loss": false,
|
| 124 |
+
"crd_loss_weight": 0.8,
|
| 125 |
+
"crop_pct": null,
|
| 126 |
+
"cutmix": 0.0,
|
| 127 |
+
"cutmix_minmax": null,
|
| 128 |
+
"dataset_download": false,
|
| 129 |
+
"debug_full_knn": false,
|
| 130 |
+
"decay_epochs": 90,
|
| 131 |
+
"decay_milestones": [
|
| 132 |
+
90,
|
| 133 |
+
180,
|
| 134 |
+
270
|
| 135 |
+
],
|
| 136 |
+
"decay_rate": 0.1,
|
| 137 |
+
"depchain": true,
|
| 138 |
+
"dist_bn": "reduce",
|
| 139 |
+
"dist_norm_weight": 0.0,
|
| 140 |
+
"distributed": true,
|
| 141 |
+
"drop": 0.0,
|
| 142 |
+
"drop_block": null,
|
| 143 |
+
"drop_connect": null,
|
| 144 |
+
"drop_path": null,
|
| 145 |
+
"dtype": "bfloat16",
|
| 146 |
+
"epoch_repeats": 0.0,
|
| 147 |
+
"eval": false,
|
| 148 |
+
"eval_metric": "knn_top1",
|
| 149 |
+
"eval_teacher": false,
|
| 150 |
+
"eval_teacher_only": false,
|
| 151 |
+
"eval_throughput": false,
|
| 152 |
+
"fast_norm": false,
|
| 153 |
+
"fd_loss_fn": "MSE",
|
| 154 |
+
"feature_normalization": "SHIP_NORM",
|
| 155 |
+
"feature_summarizer": "cls_token",
|
| 156 |
+
"feature_upscale_factor": null,
|
| 157 |
+
"force_new_wandb_id": false,
|
| 158 |
+
"force_spectral_reparam": true,
|
| 159 |
+
"freeze_bn": false,
|
| 160 |
+
"fsdp": false,
|
| 161 |
+
"fuser": "",
|
| 162 |
+
"gp": null,
|
| 163 |
+
"grad_accum_steps": 1,
|
| 164 |
+
"grad_checkpointing": false,
|
| 165 |
+
"head_init_bias": null,
|
| 166 |
+
"head_init_scale": null,
|
| 167 |
+
"head_warmup": 5,
|
| 168 |
+
"head_weight_decay": 0.001,
|
| 169 |
+
"hflip": 0.5,
|
| 170 |
+
"img_size": null,
|
| 171 |
+
"in_chans": null,
|
| 172 |
+
"initial_checkpoint": null,
|
| 173 |
+
"input_size": null,
|
| 174 |
+
"interpolation": "",
|
| 175 |
+
"layer_decay": null,
|
| 176 |
+
"local_rank": 0,
|
| 177 |
+
"log_interval": 50,
|
| 178 |
+
"log_mlflow": false,
|
| 179 |
+
"log_wandb": true,
|
| 180 |
+
"loss_auto_balance": false,
|
| 181 |
+
"lr_base": 0.1,
|
| 182 |
+
"lr_base_scale": "",
|
| 183 |
+
"lr_base_size": 256,
|
| 184 |
+
"lr_cycle_decay": 0.5,
|
| 185 |
+
"lr_cycle_limit": 1,
|
| 186 |
+
"lr_cycle_mul": 1.0,
|
| 187 |
+
"lr_k_decay": 1.0,
|
| 188 |
+
"lr_noise": null,
|
| 189 |
+
"lr_noise_pct": 0.67,
|
| 190 |
+
"lr_noise_std": 1.0,
|
| 191 |
+
"mean": null,
|
| 192 |
+
"mesa": false,
|
| 193 |
+
"min_lr": 0,
|
| 194 |
+
"mixup": 0.0,
|
| 195 |
+
"mixup_mode": "batch",
|
| 196 |
+
"mixup_off_epoch": 0,
|
| 197 |
+
"mixup_prob": 1.0,
|
| 198 |
+
"mixup_switch_prob": 0.5,
|
| 199 |
+
"mlp_hidden_size": 1520,
|
| 200 |
+
"mlp_num_inner": 3,
|
| 201 |
+
"mlp_version": "v2",
|
| 202 |
+
"model": "vit_huge_patch16_224",
|
| 203 |
+
"model_kwargs": {},
|
| 204 |
+
"model_norm": false,
|
| 205 |
+
"momentum": 0.9,
|
| 206 |
+
"no_aug": false,
|
| 207 |
+
"no_ddp_bb": true,
|
| 208 |
+
"no_prefetcher": false,
|
| 209 |
+
"no_resume_opt": false,
|
| 210 |
+
"num_classes": null,
|
| 211 |
+
"opt_betas": null,
|
| 212 |
+
"opt_eps": null,
|
| 213 |
+
"patience_epochs": 10,
|
| 214 |
+
"pin_mem": false,
|
| 215 |
+
"prefetcher": true,
|
| 216 |
+
"pretrained": false,
|
| 217 |
+
"rank": 0,
|
| 218 |
+
"ratio": [
|
| 219 |
+
0.75,
|
| 220 |
+
1.3333333333333333
|
| 221 |
+
],
|
| 222 |
+
"recount": 1,
|
| 223 |
+
"recovery_interval": 0,
|
| 224 |
+
"register_multiple": 16,
|
| 225 |
+
"remode": "pixel",
|
| 226 |
+
"reprob": 0.0,
|
| 227 |
+
"reset_loss_state": false,
|
| 228 |
+
"resplit": false,
|
| 229 |
+
"save_images": false,
|
| 230 |
+
"scale": [
|
| 231 |
+
0.5,
|
| 232 |
+
1.0
|
| 233 |
+
],
|
| 234 |
+
"sched": "cosine",
|
| 235 |
+
"seed": 42,
|
| 236 |
+
"smoothing": 0.1,
|
| 237 |
+
"spectral_heads": false,
|
| 238 |
+
"spectral_reparam": false,
|
| 239 |
+
"split_bn": false,
|
| 240 |
+
"start_epoch": null,
|
| 241 |
+
"std": null,
|
| 242 |
+
"stream_teachers": true,
|
| 243 |
+
"sync_bn": false,
|
| 244 |
+
"synchronize_step": false,
|
| 245 |
+
"teachers": [
|
| 246 |
+
{
|
| 247 |
+
"fd_normalize": false,
|
| 248 |
+
"feature_distillation": true,
|
| 249 |
+
"input_size": 378,
|
| 250 |
+
"model": "ViT-H-14-378-quickgelu",
|
| 251 |
+
"name": "clip",
|
| 252 |
+
"pretrained": "dfn5b",
|
| 253 |
+
"type": "open_clip",
|
| 254 |
+
"use_summary": true
|
| 255 |
+
},
|
| 256 |
+
{
|
| 257 |
+
"fd_normalize": false,
|
| 258 |
+
"feature_distillation": true,
|
| 259 |
+
"input_size": 378,
|
| 260 |
+
"model": "ViT-SO400M-14-SigLIP-384",
|
| 261 |
+
"name": "siglip",
|
| 262 |
+
"pretrained": "webli",
|
| 263 |
+
"type": "open_clip",
|
| 264 |
+
"use_summary": true
|
| 265 |
+
},
|
| 266 |
+
{
|
| 267 |
+
"fd_normalize": false,
|
| 268 |
+
"feature_distillation": true,
|
| 269 |
+
"input_size": 378,
|
| 270 |
+
"model": "dinov2_vitg14_reg",
|
| 271 |
+
"name": "dino_v2",
|
| 272 |
+
"type": "dino_v2",
|
| 273 |
+
"use_summary": true
|
| 274 |
+
},
|
| 275 |
+
{
|
| 276 |
+
"fd_normalize": false,
|
| 277 |
+
"feature_distillation": true,
|
| 278 |
+
"input_size": 1024,
|
| 279 |
+
"model": "vit-h",
|
| 280 |
+
"name": "sam",
|
| 281 |
+
"type": "sam",
|
| 282 |
+
"use_summary": false
|
| 283 |
+
}
|
| 284 |
+
],
|
| 285 |
+
"torchcompile": null,
|
| 286 |
+
"torchscript": false,
|
| 287 |
+
"train_interpolation": "random",
|
| 288 |
+
"train_split": "train",
|
| 289 |
+
"tta": 0,
|
| 290 |
+
"use_coco": false,
|
| 291 |
+
"use_multi_epochs_loader": false,
|
| 292 |
+
"val_ema_only": false,
|
| 293 |
+
"val_split": "val",
|
| 294 |
+
"vflip": 0.0,
|
| 295 |
+
"vitdet_version": 1,
|
| 296 |
+
"wandb_entity": "",
|
| 297 |
+
"wandb_job_type": "",
|
| 298 |
+
"wandb_name": "",
|
| 299 |
+
"wandb_project": "",
|
| 300 |
+
"warmup_lr": 1e-05,
|
| 301 |
+
"warmup_prefix": false,
|
| 302 |
+
"worker_seeding": "all",
|
| 303 |
+
"workers": 8,
|
| 304 |
+
"world_size": 256
|
| 305 |
+
},
|
| 306 |
+
"auto_map": {
|
| 307 |
+
"AutoConfig": "nvidia/C-RADIOv2-H--hf_model.RADIOConfig",
|
| 308 |
+
"AutoModel": "nvidia/C-RADIOv2-H--hf_model.RADIOModel"
|
| 309 |
+
},
|
| 310 |
+
"feature_normalizer_config": null,
|
| 311 |
+
"inter_feature_normalizer_config": null,
|
| 312 |
+
"max_resolution": 2048,
|
| 313 |
+
"model_type": "",
|
| 314 |
+
"patch_size": 16,
|
| 315 |
+
"preferred_resolution": [
|
| 316 |
+
768,
|
| 317 |
+
768
|
| 318 |
+
],
|
| 319 |
+
"torch_dtype": "bfloat16",
|
| 320 |
+
"use_flash_attn": false,
|
| 321 |
+
"version": "radio_v2.5-h",
|
| 322 |
+
"vitdet_window_size": null
|
| 323 |
+
},
|
| 324 |
+
"vit_hidden_size": 1280,
|
| 325 |
+
"quantization_config": {
|
| 326 |
+
"config_groups": {
|
| 327 |
+
"group_0": {
|
| 328 |
+
"input_activations": {
|
| 329 |
+
"dynamic": false,
|
| 330 |
+
"num_bits": 8,
|
| 331 |
+
"type": "float"
|
| 332 |
+
},
|
| 333 |
+
"weights": {
|
| 334 |
+
"dynamic": false,
|
| 335 |
+
"num_bits": 8,
|
| 336 |
+
"type": "float"
|
| 337 |
+
},
|
| 338 |
+
"targets": [
|
| 339 |
+
"Linear"
|
| 340 |
+
]
|
| 341 |
+
}
|
| 342 |
+
},
|
| 343 |
+
"ignore": [
|
| 344 |
+
"model.layers.language_model.lm_head",
|
| 345 |
+
"model.layers.mlp1*",
|
| 346 |
+
"model.layers.*.conv1d*",
|
| 347 |
+
"model.layers.vision_model*",
|
| 348 |
+
"lm_head"
|
| 349 |
+
],
|
| 350 |
+
"quant_algo": "FP8",
|
| 351 |
+
"producer": {
|
| 352 |
+
"name": "modelopt",
|
| 353 |
+
"version": "0.37.0.dev5+g76fb12d47.d20250905"
|
| 354 |
+
},
|
| 355 |
+
"quant_method": "modelopt"
|
| 356 |
+
}
|
| 357 |
+
}
|
configuration.py
ADDED
|
@@ -0,0 +1,57 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# --------------------------------------------------------
|
| 2 |
+
# Adapted from https://huggingface.co/OpenGVLab/InternVL2-Llama3-76B under MIT License
|
| 3 |
+
# LICENSE is in incl_licenses directory.
|
| 4 |
+
# --------------------------------------------------------
|
| 5 |
+
|
| 6 |
+
from transformers.configuration_utils import PretrainedConfig
|
| 7 |
+
from transformers.utils import logging
|
| 8 |
+
from .configuration_nemotron_h import NemotronHConfig
|
| 9 |
+
from .configuration_radio import RADIOConfig
|
| 10 |
+
|
| 11 |
+
logger = logging.get_logger(__name__)
|
| 12 |
+
|
| 13 |
+
class NemotronH_Nano_VL_V2_Config(PretrainedConfig):
|
| 14 |
+
model_type = 'NemotronH_Nano_VL_V2'
|
| 15 |
+
is_composition = True
|
| 16 |
+
|
| 17 |
+
def __init__(
|
| 18 |
+
self,
|
| 19 |
+
vision_config=None,
|
| 20 |
+
llm_config=None,
|
| 21 |
+
force_image_size=None,
|
| 22 |
+
downsample_ratio=0.5,
|
| 23 |
+
template=None,
|
| 24 |
+
ps_version='v1',
|
| 25 |
+
image_tag_type="internvl",
|
| 26 |
+
projector_hidden_size=4096,
|
| 27 |
+
vit_hidden_size=1280,
|
| 28 |
+
attn_implementation="flash_attention_2",
|
| 29 |
+
video_pruning_rate: float = 0.0,
|
| 30 |
+
**kwargs
|
| 31 |
+
):
|
| 32 |
+
super().__init__(**kwargs)
|
| 33 |
+
|
| 34 |
+
if vision_config is not None:
|
| 35 |
+
self.vision_config = RADIOConfig(**vision_config)
|
| 36 |
+
else:
|
| 37 |
+
self.vision_config = RADIOConfig()
|
| 38 |
+
|
| 39 |
+
# Handle both cases: when loading from JSON (llm_config is dict) and when called internally by transformers (llm_config is None)
|
| 40 |
+
if llm_config is not None:
|
| 41 |
+
self.llm_config = NemotronHConfig(**llm_config)
|
| 42 |
+
else:
|
| 43 |
+
self.llm_config = NemotronHConfig()
|
| 44 |
+
|
| 45 |
+
# Assign configuration values
|
| 46 |
+
self.force_image_size = force_image_size
|
| 47 |
+
self.downsample_ratio = downsample_ratio
|
| 48 |
+
self.template = template # TODO move out of here and into the tokenizer
|
| 49 |
+
self.ps_version = ps_version # Pixel shuffle version
|
| 50 |
+
self.image_tag_type = image_tag_type # TODO: into the tokenizer too?
|
| 51 |
+
self.projector_hidden_size = projector_hidden_size
|
| 52 |
+
self.vit_hidden_size = vit_hidden_size
|
| 53 |
+
self.video_pruning_rate = video_pruning_rate
|
| 54 |
+
|
| 55 |
+
self._attn_implementation = attn_implementation
|
| 56 |
+
self.vision_config.use_flash_attn = self._attn_implementation is not None and "flash_attention" in self._attn_implementation
|
| 57 |
+
self.llm_config._attn_implementation = self._attn_implementation
|
configuration_nemotron_h.py
ADDED
|
@@ -0,0 +1,245 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# coding=utf-8
|
| 2 |
+
# Copyright 2024 AI21 Labs Ltd. and the HuggingFace Inc. team. All rights reserved.
|
| 3 |
+
# Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
|
| 4 |
+
#
|
| 5 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 6 |
+
# you may not use this file except in compliance with the License.
|
| 7 |
+
# You may obtain a copy of the License at
|
| 8 |
+
#
|
| 9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 10 |
+
#
|
| 11 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 12 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 13 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 14 |
+
# See the License for the specific language governing permissions and
|
| 15 |
+
# limitations under the License.
|
| 16 |
+
"""NemotronH model configuration"""
|
| 17 |
+
|
| 18 |
+
import re
|
| 19 |
+
|
| 20 |
+
from transformers.configuration_utils import PretrainedConfig
|
| 21 |
+
from transformers.utils import logging
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
logger = logging.get_logger(__name__)
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
class NemotronHConfig(PretrainedConfig):
|
| 28 |
+
r"""
|
| 29 |
+
This is the configuration class to store the configuration of a [`NemotronHModel`]. It is used to instantiate a
|
| 30 |
+
NemotronH model according to the specified arguments, defining the model architecture. Instantiating a configuration
|
| 31 |
+
with the defaults will yield a similar configuration to that of the NemotronH-v0.1 model.
|
| 32 |
+
|
| 33 |
+
[todo](todo)
|
| 34 |
+
|
| 35 |
+
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
|
| 36 |
+
documentation from [`PretrainedConfig`] for more information.
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
Args:
|
| 40 |
+
vocab_size (`int`, *optional*, defaults to 131072):
|
| 41 |
+
Vocabulary size of the NemotronH model. Defines the number of different tokens that can be represented by the
|
| 42 |
+
`inputs_ids` passed when calling [`NemotronHModel`]
|
| 43 |
+
tie_word_embeddings (`bool`, *optional*, defaults to `False`):
|
| 44 |
+
Whether the model's input and output word embeddings should be tied. Note that this is only relevant if the
|
| 45 |
+
model has a output word embedding layer.
|
| 46 |
+
hidden_size (`int`, *optional*, defaults to 4096):
|
| 47 |
+
Dimension of the hidden representations.
|
| 48 |
+
intermediate_size (`int`, *optional*, defaults to 21504):
|
| 49 |
+
Dimension of the MLP representations.
|
| 50 |
+
num_hidden_layers (`int`, *optional*, defaults to 52):
|
| 51 |
+
Number of hidden layers in the Transformer encoder.
|
| 52 |
+
hybrid_override_pattern (`str`, *optional*, defaults to `"M-M-M-M*-M-M-M-M-M*-M-M-M-M-M*-M-M-M-M-M*-M-M-M-M-M-"`):
|
| 53 |
+
The pattern of the hybrid model. The pattern is a string of characters where each character represents M: Mamba2, *: Attention, -: MLP
|
| 54 |
+
num_attention_heads (`int`, *optional*, defaults to 32):
|
| 55 |
+
Number of attention heads for each attention layer in the Transformer encoder.
|
| 56 |
+
attention_head_dim (`int`, *optional*, defaults to 128):
|
| 57 |
+
Dimension of each attention head.
|
| 58 |
+
num_key_value_heads (`int`, *optional*, defaults to 8):
|
| 59 |
+
This is the number of key_value heads that should be used to implement Grouped Query Attention. If
|
| 60 |
+
`num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
|
| 61 |
+
`num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used.
|
| 62 |
+
mlp_hidden_act (`str`, *optional*, defaults to "relu2"):
|
| 63 |
+
The non-linear activation function in the MLP layers.
|
| 64 |
+
attention_bias (`bool`, *optional*, defaults to `False`):
|
| 65 |
+
Whether to use bias in attention layers.
|
| 66 |
+
mlp_bias (`bool`, *optional*, defaults to `False`):
|
| 67 |
+
Whether to use bias in MLP layers.
|
| 68 |
+
use_bias (`bool`, *optional*, defaults to `False`):
|
| 69 |
+
Whether to use bias in the model.
|
| 70 |
+
initializer_range (`float`, *optional*, defaults to 0.02):
|
| 71 |
+
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
|
| 72 |
+
layer_norm_epsilon (`float`, *optional*, defaults to 1e-5):
|
| 73 |
+
The epsilon used by the layer normalization layers.
|
| 74 |
+
residual_in_fp32 (`bool`, *optional*, defaults to `False`):
|
| 75 |
+
Whether or not residuals should be in `float32`. If set to `False` residuals will keep the same `dtype` as the rest of the model.
|
| 76 |
+
use_cache (`bool`, *optional*, defaults to `True`):
|
| 77 |
+
Whether or not the model should return the last key/values attentions (not used by all models). Only
|
| 78 |
+
relevant if `config.is_decoder=True`.
|
| 79 |
+
num_logits_to_keep (`int` or `None`, *optional*, defaults to 1):
|
| 80 |
+
Number of prompt logits to calculate during generation. If `None`, all logits will be calculated. If an
|
| 81 |
+
integer value, only last `num_logits_to_keep` logits will be calculated.
|
| 82 |
+
pad_token_id (`int`, *optional*, defaults to 0):
|
| 83 |
+
The id of the padding token.
|
| 84 |
+
bos_token_id (`int`, *optional*, defaults to 1):
|
| 85 |
+
The id of the "beginning-of-sequence" token.
|
| 86 |
+
eos_token_id (`int`, *optional*, defaults to 2):
|
| 87 |
+
The id of the "end-of-sequence" token.
|
| 88 |
+
sliding_window (`int`, *optional*, defaults to None):
|
| 89 |
+
Sliding window attention window size.
|
| 90 |
+
max_position_embeddings (`int`, *optional*, defaults to 4096):
|
| 91 |
+
The maximum sequence length that this model might ever be used with.
|
| 92 |
+
attention_dropout (`float`, *optional*, defaults to 0.0):
|
| 93 |
+
The dropout ratio for the attention probabilities.
|
| 94 |
+
hidden_dropout (`float`, *optional*, defaults to 0.0):
|
| 95 |
+
The dropout ratio for the hidden states.
|
| 96 |
+
use_mamba_kernels (`bool`, *optional*, defaults to `True`):
|
| 97 |
+
Flag indicating whether or not to use the fast mamba kernels. These are available only if `mamba-ssm` and
|
| 98 |
+
`causal-conv1d` are installed, and the mamba modules are running on a CUDA device.
|
| 99 |
+
ssm_state_size (`int`, *optional*, defaults to 128):
|
| 100 |
+
The dimension of the mamba state space latents.
|
| 101 |
+
mamba_num_heads (`int`, *optional*, defaults to 128):
|
| 102 |
+
Number of heads in Mamba layers.
|
| 103 |
+
mamba_n_groups (`int`, *optional*, defaults to 8):
|
| 104 |
+
Number of groups in Mamba layers.
|
| 105 |
+
mamba_head_dim (`int`, *optional*, defaults to 64):
|
| 106 |
+
Dimension of each Mamba head.
|
| 107 |
+
mamba_d_conv (`int`, *optional*, defaults to 4):
|
| 108 |
+
The size of the mamba convolution kernel.
|
| 109 |
+
mamba_expand (`int`, *optional*, defaults to 2):
|
| 110 |
+
Expanding factor used to determine the mamba intermediate size.
|
| 111 |
+
mamba_hidden_act (`str`, *optional*, defaults to "silu"):
|
| 112 |
+
The non-linear activation function in the Mamba layers.
|
| 113 |
+
mamba_dt_min (`float`, *optional*, defaults to 0.001):
|
| 114 |
+
Minimum value for the time step in Mamba.
|
| 115 |
+
mamba_dt_max (`float`, *optional*, defaults to 0.1):
|
| 116 |
+
Maximum value for the time step in Mamba.
|
| 117 |
+
mamba_dt_limit (`tuple`, *optional*, defaults to (0.0, float("inf"))):
|
| 118 |
+
Limits for the time step in Mamba.
|
| 119 |
+
mamba_dt_init_floor (`float`, *optional*, defaults to 1e-4):
|
| 120 |
+
Floor value for time step initialization in Mamba.
|
| 121 |
+
mamba_conv_bias (`bool`, *optional*, defaults to `True`):
|
| 122 |
+
Whether to use bias in the convolution layer of the mamba mixer block.
|
| 123 |
+
mamba_proj_bias (`bool`, *optional*, defaults to `False`):
|
| 124 |
+
Whether to use bias in the input and output projections of the mamba mixer block.
|
| 125 |
+
mamba_chunk_size (`int`, *optional*, defaults to 256):
|
| 126 |
+
Size of chunks for Mamba processing.
|
| 127 |
+
rescale_prenorm_residual (`bool`, *optional*, defaults to `True`):
|
| 128 |
+
Whether to rescale the pre-normalization residual connections.
|
| 129 |
+
"""
|
| 130 |
+
|
| 131 |
+
model_type = "nemotron_h"
|
| 132 |
+
keys_to_ignore_at_inference = ["past_key_values"]
|
| 133 |
+
|
| 134 |
+
def __init__(
|
| 135 |
+
self,
|
| 136 |
+
vocab_size=131072,
|
| 137 |
+
tie_word_embeddings=False,
|
| 138 |
+
hidden_size=4096,
|
| 139 |
+
intermediate_size=21504,
|
| 140 |
+
num_hidden_layers=52,
|
| 141 |
+
hybrid_override_pattern="M-M-M-M*-M-M-M-M-M*-M-M-M-M-M*-M-M-M-M-M*-M-M-M-M-M-",
|
| 142 |
+
num_attention_heads=32,
|
| 143 |
+
#attention_head_dim=128,
|
| 144 |
+
head_dim=128,
|
| 145 |
+
num_key_value_heads=8, # nemo: num_query_groups
|
| 146 |
+
mlp_hidden_act="relu2",
|
| 147 |
+
attention_bias=False,
|
| 148 |
+
mlp_bias=False,
|
| 149 |
+
use_bias=False,
|
| 150 |
+
initializer_range=0.02, # nemo: init_method_std
|
| 151 |
+
layer_norm_epsilon=1e-5, # nemo: layernorm_epsilon
|
| 152 |
+
residual_in_fp32=False, # Megatron Core default value
|
| 153 |
+
use_cache=True,
|
| 154 |
+
num_logits_to_keep=1,
|
| 155 |
+
pad_token_id=0,
|
| 156 |
+
bos_token_id=1,
|
| 157 |
+
eos_token_id=2,
|
| 158 |
+
sliding_window=None,
|
| 159 |
+
max_position_embeddings=4096,
|
| 160 |
+
attention_dropout=0.0,
|
| 161 |
+
hidden_dropout=0.0, # * ADDED
|
| 162 |
+
use_mamba_kernels=True,
|
| 163 |
+
ssm_state_size=128, # mamba_state_size
|
| 164 |
+
mamba_num_heads=128,
|
| 165 |
+
mamba_n_groups=8, # nemo: mamba_ssm_ngroups = num_heads
|
| 166 |
+
mamba_head_dim=64,
|
| 167 |
+
mamba_d_conv=4,
|
| 168 |
+
mamba_expand=2,
|
| 169 |
+
mamba_hidden_act="silu",
|
| 170 |
+
mamba_dt_min=0.001,
|
| 171 |
+
mamba_dt_max=0.1,
|
| 172 |
+
mamba_dt_limit=(0.0, float("inf")),
|
| 173 |
+
mamba_dt_init_floor=1e-4,
|
| 174 |
+
mamba_conv_bias=True,
|
| 175 |
+
mamba_proj_bias=False,
|
| 176 |
+
mamba_chunk_size=256,
|
| 177 |
+
rescale_prenorm_residual=True,
|
| 178 |
+
**kwargs,
|
| 179 |
+
):
|
| 180 |
+
self.vocab_size = vocab_size
|
| 181 |
+
self.tie_word_embeddings = tie_word_embeddings
|
| 182 |
+
self.hidden_size = hidden_size
|
| 183 |
+
self.intermediate_size = intermediate_size
|
| 184 |
+
self.num_hidden_layers = num_hidden_layers
|
| 185 |
+
self.hybrid_override_pattern = hybrid_override_pattern
|
| 186 |
+
self.num_attention_heads = num_attention_heads
|
| 187 |
+
#self.attention_head_dim = attention_head_dim
|
| 188 |
+
self.head_dim = head_dim
|
| 189 |
+
self.sliding_window = sliding_window
|
| 190 |
+
self.max_position_embeddings = max_position_embeddings
|
| 191 |
+
self.attention_dropout = attention_dropout
|
| 192 |
+
self.hidden_dropout = hidden_dropout
|
| 193 |
+
|
| 194 |
+
# Validate hybrid_override_pattern
|
| 195 |
+
# M: Mamba2, *: Attention, -: MLP
|
| 196 |
+
assert len(self.hybrid_override_pattern) == self.num_hidden_layers, "hybrid_override_pattern must have the same length as num_hidden_layers"
|
| 197 |
+
assert re.match(r"^[*-M]+$", self.hybrid_override_pattern), "hybrid_override_pattern must only contain characters 'M', '*', or '-'"
|
| 198 |
+
|
| 199 |
+
# for backward compatibility
|
| 200 |
+
if num_key_value_heads is None:
|
| 201 |
+
num_key_value_heads = num_attention_heads
|
| 202 |
+
|
| 203 |
+
self.num_key_value_heads = num_key_value_heads
|
| 204 |
+
self.mlp_hidden_act = mlp_hidden_act
|
| 205 |
+
self.attention_bias = attention_bias
|
| 206 |
+
self.mlp_bias = mlp_bias
|
| 207 |
+
self.use_bias = use_bias
|
| 208 |
+
self.initializer_range = initializer_range
|
| 209 |
+
self.layer_norm_epsilon = layer_norm_epsilon
|
| 210 |
+
self.residual_in_fp32 = residual_in_fp32
|
| 211 |
+
|
| 212 |
+
self.use_cache = use_cache
|
| 213 |
+
self.num_logits_to_keep = num_logits_to_keep
|
| 214 |
+
|
| 215 |
+
self.use_mamba_kernels = use_mamba_kernels
|
| 216 |
+
self.n_groups = mamba_n_groups
|
| 217 |
+
self.mamba_head_dim = mamba_head_dim
|
| 218 |
+
self.ssm_state_size = ssm_state_size
|
| 219 |
+
self.mamba_num_heads = mamba_num_heads
|
| 220 |
+
self.conv_kernel = mamba_d_conv
|
| 221 |
+
self.expand = mamba_expand
|
| 222 |
+
self.mamba_hidden_act = mamba_hidden_act
|
| 223 |
+
self.time_step_min = mamba_dt_min
|
| 224 |
+
self.time_step_max = mamba_dt_max
|
| 225 |
+
self.time_step_limit = mamba_dt_limit
|
| 226 |
+
self.time_step_floor = mamba_dt_init_floor
|
| 227 |
+
self.use_conv_bias = mamba_conv_bias
|
| 228 |
+
self.mamba_proj_bias = mamba_proj_bias
|
| 229 |
+
self.chunk_size = mamba_chunk_size
|
| 230 |
+
self.rescale_prenorm_residual = rescale_prenorm_residual
|
| 231 |
+
|
| 232 |
+
super().__init__(
|
| 233 |
+
pad_token_id=pad_token_id,
|
| 234 |
+
bos_token_id=bos_token_id,
|
| 235 |
+
eos_token_id=eos_token_id,
|
| 236 |
+
tie_word_embeddings=tie_word_embeddings,
|
| 237 |
+
**kwargs,
|
| 238 |
+
)
|
| 239 |
+
|
| 240 |
+
@property
|
| 241 |
+
def layers_block_type(self):
|
| 242 |
+
return [
|
| 243 |
+
"mamba" if self.hybrid_override_pattern[i] == "M" else
|
| 244 |
+
"attention" if self.hybrid_override_pattern[i] == "*" else "mlp"
|
| 245 |
+
for i in range(self.num_hidden_layers)]
|
configuration_radio.py
ADDED
|
@@ -0,0 +1,152 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved.
|
| 2 |
+
#
|
| 3 |
+
# NVIDIA CORPORATION and its licensors retain all intellectual property
|
| 4 |
+
# and proprietary rights in and to this software, related documentation
|
| 5 |
+
# and any modifications thereto. Any use, reproduction, disclosure or
|
| 6 |
+
# distribution of this software and related documentation without an express
|
| 7 |
+
# license agreement from NVIDIA CORPORATION is strictly prohibited.
|
| 8 |
+
|
| 9 |
+
from dataclasses import dataclass
|
| 10 |
+
from typing import Optional, NamedTuple, Union, List, Dict
|
| 11 |
+
|
| 12 |
+
from transformers import PretrainedConfig
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
class Resolution(NamedTuple):
|
| 16 |
+
height: int
|
| 17 |
+
width: int
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
@dataclass
|
| 21 |
+
class RadioResource:
|
| 22 |
+
url: str
|
| 23 |
+
patch_size: int
|
| 24 |
+
max_resolution: int
|
| 25 |
+
preferred_resolution: Resolution
|
| 26 |
+
vitdet_num_windowed: Optional[int] = None
|
| 27 |
+
vitdet_num_global: Optional[int] = None
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
RESOURCE_MAP = {
|
| 31 |
+
# RADIOv2.5
|
| 32 |
+
"radio_v2.5-b": RadioResource(
|
| 33 |
+
"https://huggingface.co/nvidia/RADIO/resolve/main/radio-v2.5-b_half.pth.tar?download=true",
|
| 34 |
+
patch_size=16,
|
| 35 |
+
max_resolution=2048,
|
| 36 |
+
preferred_resolution=(768, 768),
|
| 37 |
+
vitdet_num_global=4,
|
| 38 |
+
),
|
| 39 |
+
"radio_v2.5-l": RadioResource(
|
| 40 |
+
"https://huggingface.co/nvidia/RADIO/resolve/main/radio-v2.5-l_half.pth.tar?download=true",
|
| 41 |
+
patch_size=16,
|
| 42 |
+
max_resolution=2048,
|
| 43 |
+
preferred_resolution=(768, 768),
|
| 44 |
+
vitdet_num_global=4,
|
| 45 |
+
),
|
| 46 |
+
"radio_v2.5-h": RadioResource(
|
| 47 |
+
"https://huggingface.co/nvidia/RADIO/resolve/main/radio_v2.5-h.pth.tar?download=true",
|
| 48 |
+
patch_size=16,
|
| 49 |
+
max_resolution=2048,
|
| 50 |
+
preferred_resolution=(768, 768),
|
| 51 |
+
vitdet_num_global=4,
|
| 52 |
+
),
|
| 53 |
+
"radio_v2.5-h-norm": RadioResource(
|
| 54 |
+
"https://huggingface.co/nvidia/RADIO/resolve/main/radio_v2.5-h-norm.pth.tar?download=true",
|
| 55 |
+
patch_size=16,
|
| 56 |
+
max_resolution=2048,
|
| 57 |
+
preferred_resolution=(768, 768),
|
| 58 |
+
vitdet_num_global=4,
|
| 59 |
+
),
|
| 60 |
+
"radio_v2.5-g": RadioResource(
|
| 61 |
+
"https://huggingface.co/nvidia/RADIO/resolve/main/radio_v2.5-g.pth.tar?download=true",
|
| 62 |
+
patch_size=14,
|
| 63 |
+
max_resolution=1792,
|
| 64 |
+
preferred_resolution=(896, 896),
|
| 65 |
+
vitdet_num_global=8,
|
| 66 |
+
),
|
| 67 |
+
# RADIO
|
| 68 |
+
"radio_v2.1": RadioResource(
|
| 69 |
+
"https://huggingface.co/nvidia/RADIO/resolve/main/radio_v2.1_bf16.pth.tar?download=true",
|
| 70 |
+
patch_size=16,
|
| 71 |
+
max_resolution=2048,
|
| 72 |
+
preferred_resolution=Resolution(432, 432),
|
| 73 |
+
vitdet_num_windowed=5,
|
| 74 |
+
),
|
| 75 |
+
"radio_v2": RadioResource(
|
| 76 |
+
"https://huggingface.co/nvidia/RADIO/resolve/main/radio_v2.pth.tar?download=true",
|
| 77 |
+
patch_size=16,
|
| 78 |
+
max_resolution=2048,
|
| 79 |
+
preferred_resolution=Resolution(432, 432),
|
| 80 |
+
vitdet_num_windowed=5,
|
| 81 |
+
),
|
| 82 |
+
"radio_v1": RadioResource(
|
| 83 |
+
"https://huggingface.co/nvidia/RADIO/resolve/main/radio_v1.pth.tar?download=true",
|
| 84 |
+
patch_size=14,
|
| 85 |
+
max_resolution=1050,
|
| 86 |
+
preferred_resolution=Resolution(378, 378),
|
| 87 |
+
),
|
| 88 |
+
# E-RADIO
|
| 89 |
+
"e-radio_v2": RadioResource(
|
| 90 |
+
"https://huggingface.co/nvidia/RADIO/resolve/main/eradio_v2.pth.tar?download=true",
|
| 91 |
+
patch_size=16,
|
| 92 |
+
max_resolution=2048,
|
| 93 |
+
preferred_resolution=Resolution(512, 512),
|
| 94 |
+
),
|
| 95 |
+
# C-RADIO
|
| 96 |
+
"c-radio_v2.5-g": RadioResource(
|
| 97 |
+
"https://huggingface.co/nvidia/C-RADIOv2-g/resolve/main/c-radio_v2-g_half.pth.tar",
|
| 98 |
+
patch_size=16,
|
| 99 |
+
max_resolution=2048,
|
| 100 |
+
preferred_resolution=(768, 768),
|
| 101 |
+
vitdet_num_global=8,
|
| 102 |
+
),
|
| 103 |
+
"c-radio_v3-l": RadioResource(
|
| 104 |
+
# NOTE: Currently, this model cannot be loaded via TorchHub. Instead, use the transformers API at https://huggingface.co/nvidia/C-RADIOv3-L
|
| 105 |
+
# and accept the license terms.
|
| 106 |
+
"https://huggingface.co/nvidia/C-RADIOv3-L/resolve/main/c-radio-v3_l_half.pth.tar?download=true",
|
| 107 |
+
patch_size=16,
|
| 108 |
+
max_resolution=2048,
|
| 109 |
+
preferred_resolution=Resolution(512, 512),
|
| 110 |
+
),
|
| 111 |
+
}
|
| 112 |
+
|
| 113 |
+
DEFAULT_VERSION = "radio_v2.5-h"
|
| 114 |
+
|
| 115 |
+
|
| 116 |
+
class RADIOConfig(PretrainedConfig):
|
| 117 |
+
"""Pretrained Hugging Face configuration for RADIO models."""
|
| 118 |
+
|
| 119 |
+
def __init__(
|
| 120 |
+
self,
|
| 121 |
+
args: Optional[dict] = None,
|
| 122 |
+
version: Optional[str] = DEFAULT_VERSION,
|
| 123 |
+
patch_size: Optional[int] = None,
|
| 124 |
+
max_resolution: Optional[int] = None,
|
| 125 |
+
preferred_resolution: Optional[Resolution] = None,
|
| 126 |
+
adaptor_names: Union[str, List[str]] = None,
|
| 127 |
+
adaptor_configs: Dict[str, Dict[str, int]] = None,
|
| 128 |
+
vitdet_window_size: Optional[int] = None,
|
| 129 |
+
feature_normalizer_config: Optional[dict] = None,
|
| 130 |
+
inter_feature_normalizer_config: Optional[dict] = None,
|
| 131 |
+
**kwargs,
|
| 132 |
+
):
|
| 133 |
+
self.args = args
|
| 134 |
+
for field in ["dtype", "amp_dtype"]:
|
| 135 |
+
if self.args is not None and field in self.args:
|
| 136 |
+
# Convert to a string in order to make it serializable.
|
| 137 |
+
# For example for torch.float32 we will store "float32",
|
| 138 |
+
# for "bfloat16" we will store "bfloat16".
|
| 139 |
+
self.args[field] = str(args[field]).split(".")[-1]
|
| 140 |
+
self.version = version
|
| 141 |
+
resource = RESOURCE_MAP[version]
|
| 142 |
+
self.patch_size = patch_size or resource.patch_size
|
| 143 |
+
self.max_resolution = max_resolution or resource.max_resolution
|
| 144 |
+
self.preferred_resolution = (
|
| 145 |
+
preferred_resolution or resource.preferred_resolution
|
| 146 |
+
)
|
| 147 |
+
self.adaptor_names = adaptor_names
|
| 148 |
+
self.adaptor_configs = adaptor_configs
|
| 149 |
+
self.vitdet_window_size = vitdet_window_size
|
| 150 |
+
self.feature_normalizer_config = feature_normalizer_config
|
| 151 |
+
self.inter_feature_normalizer_config = inter_feature_normalizer_config
|
| 152 |
+
super().__init__(**kwargs)
|
evs.py
ADDED
|
@@ -0,0 +1,73 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
from typing import Tuple
|
| 3 |
+
|
| 4 |
+
class EfficientVideoSampling:
|
| 5 |
+
@staticmethod
|
| 6 |
+
def compute_retention_mask(
|
| 7 |
+
*,
|
| 8 |
+
video_embeds: torch.FloatTensor,
|
| 9 |
+
thw: torch.LongTensor,
|
| 10 |
+
spatial_merge_size: int,
|
| 11 |
+
q: float,
|
| 12 |
+
):
|
| 13 |
+
"""
|
| 14 |
+
Computes the retention mask for video embeddings based on the grid dimensions.
|
| 15 |
+
|
| 16 |
+
Args:
|
| 17 |
+
video_embeds (`torch.FloatTensor` of shape `(T * H * W, hidden_size)`):
|
| 18 |
+
The video embeddings to compute the retention mask for.
|
| 19 |
+
thw (`torch.LongTensor` of shape `(3)`):
|
| 20 |
+
The temporal, height and width of feature shape of each video in LLM.
|
| 21 |
+
spatial_merge_size (`int`): The spatial merge size of the video embeddings.
|
| 22 |
+
If embeddings will be downsampled *later*, this should be the downsampling factor.
|
| 23 |
+
q: (`float`): Pruning rate factor, indicating number of tokens to prune (remove)
|
| 24 |
+
|
| 25 |
+
Returns:
|
| 26 |
+
`torch.Tensor`: The retention mask for the video embeddings (T * H * W).
|
| 27 |
+
1 for tokens to keep, 0 for tokens to prune.
|
| 28 |
+
"""
|
| 29 |
+
T, H, W = thw
|
| 30 |
+
|
| 31 |
+
# video_embeds = einops.rearrange(
|
| 32 |
+
# video_embeds,
|
| 33 |
+
# "(T H W) C -> T H W C",
|
| 34 |
+
# T=T,
|
| 35 |
+
# H=H // spatial_merge_size,
|
| 36 |
+
# W=W // spatial_merge_size,
|
| 37 |
+
# )
|
| 38 |
+
# Use reshape instead of einops to avoid graph breaks
|
| 39 |
+
video_embeds = video_embeds.reshape(
|
| 40 |
+
T, H // spatial_merge_size, W // spatial_merge_size, video_embeds.size(-1)
|
| 41 |
+
)
|
| 42 |
+
|
| 43 |
+
# Core EVS
|
| 44 |
+
similarity = torch.nn.functional.cosine_similarity(
|
| 45 |
+
video_embeds[1:, ...], video_embeds[:-1, ...], dim=-1
|
| 46 |
+
)
|
| 47 |
+
dissimilarity = 1 - similarity
|
| 48 |
+
|
| 49 |
+
# Always ensure we include all tokens from the first frame
|
| 50 |
+
dissimilarity = torch.cat(
|
| 51 |
+
[255 * torch.ones_like(video_embeds[:1, :, :, 0]), dissimilarity], dim=0
|
| 52 |
+
)
|
| 53 |
+
dissimilarity_flat = dissimilarity.view(-1)
|
| 54 |
+
|
| 55 |
+
min_num_tokens = (H // spatial_merge_size) * (W // spatial_merge_size) # a single frame
|
| 56 |
+
evs_num_tokens = int(T * min_num_tokens * (1 - q))
|
| 57 |
+
num_tokens_to_keep = max(min_num_tokens, evs_num_tokens)
|
| 58 |
+
|
| 59 |
+
order = torch.argsort(dissimilarity_flat,
|
| 60 |
+
dim=-1,
|
| 61 |
+
descending=True,
|
| 62 |
+
stable=True)
|
| 63 |
+
topk_indices = order[:num_tokens_to_keep]
|
| 64 |
+
|
| 65 |
+
retention_mask = torch.zeros_like(dissimilarity_flat, dtype=torch.bool)
|
| 66 |
+
retention_mask[topk_indices] = True
|
| 67 |
+
retention_mask = retention_mask.reshape(dissimilarity.size())
|
| 68 |
+
|
| 69 |
+
# print(
|
| 70 |
+
# f"Computed retention mask of shape {retention_mask.shape=} with sparsity {retention_mask.float().mean().item():.4f} for {q=}",
|
| 71 |
+
# )
|
| 72 |
+
mask = retention_mask.view(-1) # "T H W -> (T H W)"
|
| 73 |
+
return mask
|
explainability.md
ADDED
|
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Field | Response
|
| 2 |
+
:------------------------------------------------------------------------------------------------------|:---------------------------------------------------------------------------------
|
| 3 |
+
Intended Task/Domain: | Visual Question Answering
|
| 4 |
+
Model Type: | Transformer
|
| 5 |
+
Intended Users: | Individuals and businesses that need to process documents such as invoices, receipts, and manuals. Also, users who are building multi-modal agents and RAG systems.
|
| 6 |
+
Output: | Text
|
| 7 |
+
Tools used to evaluate datasets to identify synthetic data and ensure data authenticity. | We used a Gemma-3 4B-based filtering model fine-tuned on [Nemotron Content Safety Dataset v2](https://huggingface.co/datasets/nvidia/Aegis-AI-Content-Safety-Dataset-2.0) to ensure the quality of synthetic data.
|
| 8 |
+
Describe how the model works: | Vision Encoder and a Nemotron 5.5H -12B Language Encoder. It processes multiple input modalities, including text, multiple images, and video. It fuses these inputs and uses its large language model backbone with a 128K context length to perform visual Q&A, summarization, and data extraction.
|
| 9 |
+
Name the adversely impacted groups this has been tested to deliver comparable outcomes regardless of: | Not Applicable
|
| 10 |
+
Technical Limitations & Mitigation: | The model has a limited maximum resolution determined by a 12-tile layout constraint, where each tile is 512x512 pixels. It also supports a limited number of input images (up to 4) and has a maximum context length of 128K tokens for combined input and output.
|
| 11 |
+
Verified to have met prescribed NVIDIA quality standards: | Yes
|
| 12 |
+
Performance Metrics: | Accuracy (Visual Question Answering), Latency, Throughput
|
| 13 |
+
Potential Known Risks: | The Model may produce output that is biased, toxic, or incorrect responses. Therefore, the model may amplify those biases and return toxic responses especially when prompted with toxic prompts. The Model may also generate answers that may be inaccurate, omit key information, or include irrelevant or redundant text, producing socially unacceptable or undesirable text, even if the prompt itself does not include anything explicitly offensive.While we have taken safety and security into account and are continuously improving, outputs may still contain political content, misleading information, or unwanted bias beyond our control.
|
| 14 |
+
Licensing: | Governing Terms: Use of this model is governed by the [ NVIDIA Open Model License Agreement](https://www.nvidia.com/en-us/agreements/enterprise-software/nvidia-open-model-license/)
|
| 15 |
+
|
generation_config.json
ADDED
|
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"_from_model_config": true,
|
| 3 |
+
"bos_token_id": 1,
|
| 4 |
+
"eos_token_id": [
|
| 5 |
+
2,
|
| 6 |
+
11,
|
| 7 |
+
12
|
| 8 |
+
],
|
| 9 |
+
"pad_token_id": 0,
|
| 10 |
+
"transformers_version": "4.51.3"
|
| 11 |
+
}
|
hf_quant_config.json
ADDED
|
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"producer": {
|
| 3 |
+
"name": "modelopt",
|
| 4 |
+
"version": "0.37.0.dev5+g76fb12d47.d20250905"
|
| 5 |
+
},
|
| 6 |
+
"quantization": {
|
| 7 |
+
"quant_algo": "FP8",
|
| 8 |
+
"kv_cache_quant_algo": null,
|
| 9 |
+
"exclude_modules": [
|
| 10 |
+
"model.layers.language_model.lm_head",
|
| 11 |
+
"model.layers.mlp1*",
|
| 12 |
+
"model.layers.*.conv1d*",
|
| 13 |
+
"model.layers.vision_model*",
|
| 14 |
+
"lm_head"
|
| 15 |
+
]
|
| 16 |
+
}
|
| 17 |
+
}
|
image_processing.py
ADDED
|
@@ -0,0 +1,148 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import List, Optional, Union, Any, Dict
|
| 2 |
+
|
| 3 |
+
from PIL import Image
|
| 4 |
+
import torch
|
| 5 |
+
from transformers.image_processing_base import BatchFeature
|
| 6 |
+
from transformers.image_processing_utils_fast import BaseImageProcessorFast, divide_to_patches
|
| 7 |
+
from transformers.image_utils import (make_list_of_images, get_image_size,
|
| 8 |
+
get_image_type, ImageInput, ImageType, ChannelDimension)
|
| 9 |
+
from transformers.utils import TensorType
|
| 10 |
+
import torchvision.transforms as T
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
class NemotronNanoVLV2ImageProcessor(BaseImageProcessorFast):
|
| 15 |
+
model_input_names = ["pixel_values"]
|
| 16 |
+
|
| 17 |
+
def __init__(self, image_size=512, max_num_tiles=12, use_thumbnail=True, norm_mean=None, norm_std=None, do_rescale=True, patch_size=16, downsample_ratio=0.5, **kwargs):
|
| 18 |
+
super().__init__(**kwargs)
|
| 19 |
+
self.image_size = image_size
|
| 20 |
+
self.max_num_tiles = max_num_tiles
|
| 21 |
+
self.use_thumbnail = use_thumbnail
|
| 22 |
+
self.norm_mean = norm_mean
|
| 23 |
+
self.norm_std = norm_std
|
| 24 |
+
self.do_rescale = do_rescale
|
| 25 |
+
self.num_image_token = int((image_size // patch_size) ** 2 * (downsample_ratio ** 2))
|
| 26 |
+
|
| 27 |
+
def _process_image(
|
| 28 |
+
self,
|
| 29 |
+
image: ImageInput,
|
| 30 |
+
**kwargs,
|
| 31 |
+
) -> torch.Tensor:
|
| 32 |
+
image_type = get_image_type(image)
|
| 33 |
+
if image_type == ImageType.PIL:
|
| 34 |
+
if image.mode != 'RGB':
|
| 35 |
+
image = image.convert('RGB')
|
| 36 |
+
image = T.ToTensor()(image)
|
| 37 |
+
return image
|
| 38 |
+
|
| 39 |
+
def _preprocess(
|
| 40 |
+
self,
|
| 41 |
+
images: List[torch.Tensor],
|
| 42 |
+
image_size: int = None,
|
| 43 |
+
max_num_tiles: int = None,
|
| 44 |
+
use_thumbnail: bool = None,
|
| 45 |
+
do_rescale: bool = None,
|
| 46 |
+
return_tensors: Optional[Union[str, TensorType]] = None,
|
| 47 |
+
**kwargs,
|
| 48 |
+
) -> List[torch.Tensor]:
|
| 49 |
+
image_size = image_size if image_size is not None else self.image_size
|
| 50 |
+
max_num_tiles = max_num_tiles if max_num_tiles is not None else self.max_num_tiles
|
| 51 |
+
use_thumbnail = use_thumbnail if use_thumbnail is not None else self.use_thumbnail
|
| 52 |
+
do_rescale = do_rescale if do_rescale is not None else self.do_rescale
|
| 53 |
+
|
| 54 |
+
images = make_list_of_images(images)
|
| 55 |
+
|
| 56 |
+
all_patches = []
|
| 57 |
+
num_patches = []
|
| 58 |
+
for image in images:
|
| 59 |
+
patches = dynamic_preprocess(image, image_size, max_num_tiles, use_thumbnail)
|
| 60 |
+
all_patches.extend(patches)
|
| 61 |
+
num_patches.append(len(patches))
|
| 62 |
+
|
| 63 |
+
pixel_values = torch.stack(all_patches, dim=0)
|
| 64 |
+
norm_mean = torch.Tensor(self.norm_mean).view(1, 3, 1, 1)
|
| 65 |
+
norm_std = torch.Tensor(self.norm_std).view(1, 3, 1, 1)
|
| 66 |
+
pixel_values = (pixel_values - norm_mean) / norm_std
|
| 67 |
+
return BatchFeature(data={"pixel_values": pixel_values, "num_patches": num_patches}, tensor_type=return_tensors)
|
| 68 |
+
|
| 69 |
+
|
| 70 |
+
def get_internvl_target_ratios(
|
| 71 |
+
min_num: int,
|
| 72 |
+
max_num: int,
|
| 73 |
+
) -> list[tuple[int, int]]:
|
| 74 |
+
target_ratios = {(i, j)
|
| 75 |
+
for n in range(min_num, max_num + 1)
|
| 76 |
+
for i in range(1, n + 1)
|
| 77 |
+
for j in range(1, n + 1) if min_num <= i * j <= max_num}
|
| 78 |
+
return sorted(target_ratios, key=lambda x: x[0] * x[1])
|
| 79 |
+
|
| 80 |
+
|
| 81 |
+
# From https://github.com/OpenGVLab/InternVL/blob/c62fa4f7c850165d7386bdc48ac6bc5a6fab0864/internvl_chat/internvl/train/dataset.py#L685
|
| 82 |
+
# Copyright (c) 2023 OpenGVLab.
|
| 83 |
+
def find_closest_aspect_ratio(
|
| 84 |
+
aspect_ratio: float,
|
| 85 |
+
target_ratios: list[tuple[int, int]],
|
| 86 |
+
width: int,
|
| 87 |
+
height: int,
|
| 88 |
+
image_size: int,
|
| 89 |
+
) -> tuple[int, int]:
|
| 90 |
+
best_ratio_diff = float("inf")
|
| 91 |
+
best_ratio = (1, 1)
|
| 92 |
+
area = width * height
|
| 93 |
+
for ratio in target_ratios:
|
| 94 |
+
target_aspect_ratio = ratio[0] / ratio[1]
|
| 95 |
+
ratio_diff = abs(aspect_ratio - target_aspect_ratio)
|
| 96 |
+
if ratio_diff < best_ratio_diff:
|
| 97 |
+
best_ratio_diff = ratio_diff
|
| 98 |
+
best_ratio = ratio
|
| 99 |
+
elif ratio_diff == best_ratio_diff:
|
| 100 |
+
if area > 0.5 * image_size * image_size * ratio[0] * ratio[1]:
|
| 101 |
+
best_ratio = ratio
|
| 102 |
+
return best_ratio
|
| 103 |
+
|
| 104 |
+
|
| 105 |
+
def calculate_targets(
|
| 106 |
+
orig_width: int,
|
| 107 |
+
orig_height: int,
|
| 108 |
+
target_ratios: list[tuple[int, int]],
|
| 109 |
+
image_size: int,
|
| 110 |
+
) -> tuple[int, int, int]:
|
| 111 |
+
aspect_ratio = orig_width / orig_height
|
| 112 |
+
|
| 113 |
+
# find the closest aspect ratio to the target
|
| 114 |
+
target_aspect_ratio = find_closest_aspect_ratio(
|
| 115 |
+
aspect_ratio,
|
| 116 |
+
target_ratios,
|
| 117 |
+
width=orig_width,
|
| 118 |
+
height=orig_height,
|
| 119 |
+
image_size=image_size,
|
| 120 |
+
)
|
| 121 |
+
|
| 122 |
+
# calculate the target width and height
|
| 123 |
+
target_width = image_size * target_aspect_ratio[0]
|
| 124 |
+
target_height = image_size * target_aspect_ratio[1]
|
| 125 |
+
blocks = target_aspect_ratio[0] * target_aspect_ratio[1]
|
| 126 |
+
|
| 127 |
+
return blocks, target_width, target_height
|
| 128 |
+
|
| 129 |
+
|
| 130 |
+
def dynamic_preprocess(image, image_size=512, max_num_tiles=12, use_thumbnail=True):
|
| 131 |
+
orig_height, orig_width = get_image_size(image, channel_dim=ChannelDimension.FIRST)
|
| 132 |
+
target_ratios = get_internvl_target_ratios(1, max_num_tiles)
|
| 133 |
+
|
| 134 |
+
blocks, target_width, target_height = calculate_targets(
|
| 135 |
+
orig_width,
|
| 136 |
+
orig_height,
|
| 137 |
+
target_ratios,
|
| 138 |
+
image_size
|
| 139 |
+
)
|
| 140 |
+
# resize the image
|
| 141 |
+
resized_img = T.Resize((target_height, target_width), interpolation=T.InterpolationMode.BICUBIC)(image)
|
| 142 |
+
patches = divide_to_patches(resized_img, image_size)
|
| 143 |
+
assert len(patches) == blocks
|
| 144 |
+
if use_thumbnail and len(patches) != 1:
|
| 145 |
+
thumbnail_img = T.Resize((image_size, image_size), interpolation=T.InterpolationMode.BICUBIC)(image)
|
| 146 |
+
patches.append(thumbnail_img)
|
| 147 |
+
|
| 148 |
+
return patches
|
llama_nemotron_toolcall_parser_no_streaming.py
ADDED
|
@@ -0,0 +1,470 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 2 |
+
|
| 3 |
+
import ast
|
| 4 |
+
import json
|
| 5 |
+
import re
|
| 6 |
+
from collections.abc import Sequence
|
| 7 |
+
from typing import Union
|
| 8 |
+
|
| 9 |
+
import partial_json_parser
|
| 10 |
+
from partial_json_parser.core.options import Allow
|
| 11 |
+
|
| 12 |
+
from vllm.entrypoints.openai.protocol import (
|
| 13 |
+
ChatCompletionRequest,
|
| 14 |
+
DeltaFunctionCall, DeltaMessage,
|
| 15 |
+
DeltaToolCall,
|
| 16 |
+
ExtractedToolCallInformation,
|
| 17 |
+
FunctionCall,
|
| 18 |
+
ToolCall,
|
| 19 |
+
)
|
| 20 |
+
from vllm.entrypoints.openai.tool_parsers.abstract_tool_parser import (
|
| 21 |
+
ToolParser,
|
| 22 |
+
ToolParserManager,
|
| 23 |
+
)
|
| 24 |
+
from vllm.logger import init_logger
|
| 25 |
+
from vllm.transformers_utils.tokenizer import AnyTokenizer
|
| 26 |
+
from vllm.utils import random_uuid
|
| 27 |
+
|
| 28 |
+
logger = init_logger(__name__)
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
@ToolParserManager.register_module("llama_nemotron_xml")
|
| 32 |
+
class LlamaNemotronXMLToolParser(ToolParser):
|
| 33 |
+
|
| 34 |
+
def __init__(self, tokenizer: AnyTokenizer):
|
| 35 |
+
super().__init__(tokenizer)
|
| 36 |
+
|
| 37 |
+
self.current_tool_name_sent: bool = False
|
| 38 |
+
self.prev_tool_call_arr: list[dict] = []
|
| 39 |
+
self.current_tool_id: int = -1 # Potentially for streaming
|
| 40 |
+
self.streamed_args_for_tool: list[str] = [] # Potentially for streaming
|
| 41 |
+
|
| 42 |
+
self.tool_call_start_token: str = "<tool_call>"
|
| 43 |
+
self.tool_call_end_token: str = "</tool_call>"
|
| 44 |
+
|
| 45 |
+
# Regex to find full <tool_call>...</tool_call> blocks and capture their content
|
| 46 |
+
self.tool_call_block_regex = re.compile(r"<tool_call>(.*?)</tool_call>", re.DOTALL)
|
| 47 |
+
# Regex to find <tool>...</tool> within a tool_call block content
|
| 48 |
+
self.name_regex = re.compile(r"<tool>(.*?)</tool>", re.DOTALL)
|
| 49 |
+
# Regex to find <key>value</key> pairs within the tool_call block content (excluding <tool> tags)
|
| 50 |
+
self.param_regex = re.compile(r"<([^/>\s]+)>(.*?)</\1>", re.DOTALL)
|
| 51 |
+
|
| 52 |
+
def extract_tool_calls(
|
| 53 |
+
self,
|
| 54 |
+
model_output: str,
|
| 55 |
+
request: ChatCompletionRequest,
|
| 56 |
+
) -> ExtractedToolCallInformation:
|
| 57 |
+
|
| 58 |
+
tool_call_start_index = model_output.find(self.tool_call_start_token)
|
| 59 |
+
|
| 60 |
+
if tool_call_start_index == -1:
|
| 61 |
+
return ExtractedToolCallInformation(
|
| 62 |
+
tools_called=False,
|
| 63 |
+
tool_calls=[],
|
| 64 |
+
content=model_output,
|
| 65 |
+
)
|
| 66 |
+
|
| 67 |
+
content = model_output[:tool_call_start_index].strip()
|
| 68 |
+
tool_calls_str_content = model_output[tool_call_start_index:]
|
| 69 |
+
|
| 70 |
+
parsed_tool_calls = []
|
| 71 |
+
|
| 72 |
+
try:
|
| 73 |
+
# Find all occurrences of <tool_call>...</tool_call>
|
| 74 |
+
xml_tool_call_contents = self.tool_call_block_regex.findall(tool_calls_str_content)
|
| 75 |
+
|
| 76 |
+
for tool_content_str in xml_tool_call_contents:
|
| 77 |
+
name_match = self.name_regex.search(tool_content_str)
|
| 78 |
+
if not name_match:
|
| 79 |
+
logger.warning(f"Could not find tool name in XML block: {tool_content_str}")
|
| 80 |
+
continue
|
| 81 |
+
tool_name = name_match.group(1).strip()
|
| 82 |
+
|
| 83 |
+
parsed_arguments = {}
|
| 84 |
+
|
| 85 |
+
# Find all parameter tags in the tool_call content, excluding the <tool> tag
|
| 86 |
+
param_matches = self.param_regex.finditer(tool_content_str)
|
| 87 |
+
|
| 88 |
+
for match in param_matches:
|
| 89 |
+
param_name = match.group(1).strip()
|
| 90 |
+
param_value_str = match.group(2).strip()
|
| 91 |
+
|
| 92 |
+
# Skip the <tool> tag since it's not a parameter
|
| 93 |
+
if param_name == "tool":
|
| 94 |
+
continue
|
| 95 |
+
|
| 96 |
+
target_type = None
|
| 97 |
+
# Try to get type from request.tools schema
|
| 98 |
+
if request.tools:
|
| 99 |
+
for tool_def in request.tools:
|
| 100 |
+
if tool_def.function.name == tool_name:
|
| 101 |
+
if tool_def.function.parameters and \
|
| 102 |
+
isinstance(tool_def.function.parameters, dict) and \
|
| 103 |
+
"properties" in tool_def.function.parameters and \
|
| 104 |
+
isinstance(tool_def.function.parameters["properties"], dict) and \
|
| 105 |
+
param_name in tool_def.function.parameters["properties"] and \
|
| 106 |
+
isinstance(tool_def.function.parameters["properties"][param_name], dict):
|
| 107 |
+
target_type = tool_def.function.parameters["properties"][param_name].get("type")
|
| 108 |
+
break
|
| 109 |
+
|
| 110 |
+
typed_param_value = param_value_str # Default to string
|
| 111 |
+
if target_type:
|
| 112 |
+
try:
|
| 113 |
+
if target_type == "string":
|
| 114 |
+
typed_param_value = param_value_str
|
| 115 |
+
elif target_type == "integer":
|
| 116 |
+
typed_param_value = int(param_value_str)
|
| 117 |
+
elif target_type == "number":
|
| 118 |
+
typed_param_value = float(param_value_str)
|
| 119 |
+
elif target_type == "boolean":
|
| 120 |
+
typed_param_value = param_value_str.lower() == 'true'
|
| 121 |
+
elif target_type in ["object", "array"]:
|
| 122 |
+
try:
|
| 123 |
+
typed_param_value = json.loads(param_value_str)
|
| 124 |
+
except json.JSONDecodeError:
|
| 125 |
+
# Fallback for non-strict JSON like Python dict/list string
|
| 126 |
+
typed_param_value = ast.literal_eval(param_value_str)
|
| 127 |
+
else: # Unknown type, keep as string
|
| 128 |
+
typed_param_value = param_value_str
|
| 129 |
+
except (ValueError, SyntaxError, json.JSONDecodeError) as e:
|
| 130 |
+
logger.warning(
|
| 131 |
+
f"Could not convert param '{param_name}' with value '{param_value_str}' "
|
| 132 |
+
f"to type '{target_type}'. Error: {e}. Using string value."
|
| 133 |
+
)
|
| 134 |
+
typed_param_value = param_value_str
|
| 135 |
+
else: # No schema type, try ast.literal_eval
|
| 136 |
+
try:
|
| 137 |
+
# For values like "true", "123", "['a', 'b']"
|
| 138 |
+
# ast.literal_eval('some_string_without_quotes') will raise SyntaxError
|
| 139 |
+
if (param_value_str.startswith("'") and param_value_str.endswith("'")) or \
|
| 140 |
+
(param_value_str.startswith('"') and param_value_str.endswith('"')) or \
|
| 141 |
+
(param_value_str.startswith('[') and param_value_str.endswith(']')) or \
|
| 142 |
+
(param_value_str.startswith('{') and param_value_str.endswith('}')) or \
|
| 143 |
+
param_value_str.lower() in ['true', 'false', 'none'] or \
|
| 144 |
+
param_value_str.replace('.', '', 1).isdigit() or \
|
| 145 |
+
(param_value_str.startswith('-') and param_value_str[1:].replace('.', '', 1).isdigit()):
|
| 146 |
+
typed_param_value = ast.literal_eval(param_value_str)
|
| 147 |
+
else: # It's likely a plain string not meant for ast.literal_eval
|
| 148 |
+
typed_param_value = param_value_str
|
| 149 |
+
except (ValueError, SyntaxError):
|
| 150 |
+
typed_param_value = param_value_str # Keep as string if ast.literal_eval fails
|
| 151 |
+
|
| 152 |
+
parsed_arguments[param_name] = typed_param_value
|
| 153 |
+
|
| 154 |
+
parsed_tool_calls.append(ToolCall(
|
| 155 |
+
id=f"call_{random_uuid()}",
|
| 156 |
+
type="function",
|
| 157 |
+
function=FunctionCall(
|
| 158 |
+
name=tool_name,
|
| 159 |
+
arguments=json.dumps(parsed_arguments, ensure_ascii=False),
|
| 160 |
+
),
|
| 161 |
+
))
|
| 162 |
+
|
| 163 |
+
return ExtractedToolCallInformation(
|
| 164 |
+
tools_called=len(parsed_tool_calls) > 0,
|
| 165 |
+
tool_calls=parsed_tool_calls,
|
| 166 |
+
content=content if content else None,
|
| 167 |
+
)
|
| 168 |
+
|
| 169 |
+
except Exception:
|
| 170 |
+
logger.exception(f"Error in extracting XML tool call from response. Response: {model_output}")
|
| 171 |
+
# Fallback to original model output if parsing fails catastrophically
|
| 172 |
+
return ExtractedToolCallInformation(
|
| 173 |
+
tools_called=False,
|
| 174 |
+
tool_calls=[],
|
| 175 |
+
content=model_output,
|
| 176 |
+
)
|
| 177 |
+
|
| 178 |
+
def extract_tool_calls_streaming(
|
| 179 |
+
self,
|
| 180 |
+
previous_text: str,
|
| 181 |
+
current_text: str,
|
| 182 |
+
delta_text: str,
|
| 183 |
+
previous_token_ids: Sequence[int],
|
| 184 |
+
current_token_ids: Sequence[int],
|
| 185 |
+
delta_token_ids: Sequence[int],
|
| 186 |
+
request: ChatCompletionRequest,
|
| 187 |
+
) -> Union[DeltaMessage, None]:
|
| 188 |
+
|
| 189 |
+
raise NotImplementedError("Tool calling is not supported in streaming mode!")
|
| 190 |
+
|
| 191 |
+
|
| 192 |
+
@ToolParserManager.register_module("llama_nemotron_json")
|
| 193 |
+
class LlamaNemotronJSONToolParser(ToolParser):
|
| 194 |
+
|
| 195 |
+
def __init__(self, tokenizer: AnyTokenizer):
|
| 196 |
+
super().__init__(tokenizer)
|
| 197 |
+
|
| 198 |
+
self.current_tool_name_sent: bool = False
|
| 199 |
+
self.prev_tool_call_arr: list[dict] = []
|
| 200 |
+
self.current_tool_id: int = -1
|
| 201 |
+
self.streamed_args_for_tool: list[str] = []
|
| 202 |
+
|
| 203 |
+
self.tool_call_start_token: str = "<TOOLCALL>"
|
| 204 |
+
self.tool_call_end_token: str = "</TOOLCALL>"
|
| 205 |
+
|
| 206 |
+
self.tool_call_regex = re.compile(r"<TOOLCALL>(.*?)</TOOLCALL>", re.DOTALL)
|
| 207 |
+
|
| 208 |
+
def extract_tool_calls(
|
| 209 |
+
self,
|
| 210 |
+
model_output: str,
|
| 211 |
+
request: ChatCompletionRequest,
|
| 212 |
+
) -> ExtractedToolCallInformation:
|
| 213 |
+
|
| 214 |
+
if self.tool_call_start_token not in model_output:
|
| 215 |
+
return ExtractedToolCallInformation(
|
| 216 |
+
tools_called=False,
|
| 217 |
+
tool_calls=[],
|
| 218 |
+
content=model_output,
|
| 219 |
+
)
|
| 220 |
+
|
| 221 |
+
else:
|
| 222 |
+
|
| 223 |
+
try:
|
| 224 |
+
str_tool_calls = self.tool_call_regex.findall(model_output)[0].strip()
|
| 225 |
+
if not str_tool_calls.startswith("["):
|
| 226 |
+
str_tool_calls = "[" + str_tool_calls
|
| 227 |
+
if not str_tool_calls.endswith("]"):
|
| 228 |
+
str_tool_calls = str_tool_calls + "]"
|
| 229 |
+
json_tool_calls = json.loads(str_tool_calls)
|
| 230 |
+
tool_calls = []
|
| 231 |
+
for tool_call in json_tool_calls:
|
| 232 |
+
try:
|
| 233 |
+
tool_calls.append(ToolCall(
|
| 234 |
+
type="function",
|
| 235 |
+
function=FunctionCall(
|
| 236 |
+
name=tool_call["name"],
|
| 237 |
+
arguments=json.dumps(tool_call["arguments"], ensure_ascii=False) \
|
| 238 |
+
if isinstance(tool_call["arguments"], dict) else tool_call["arguments"],
|
| 239 |
+
),
|
| 240 |
+
))
|
| 241 |
+
except:
|
| 242 |
+
continue
|
| 243 |
+
|
| 244 |
+
content = model_output[:model_output.rfind(self.tool_call_start_token)]
|
| 245 |
+
|
| 246 |
+
return ExtractedToolCallInformation(
|
| 247 |
+
tools_called=True,
|
| 248 |
+
tool_calls=tool_calls,
|
| 249 |
+
content=content if content else None,
|
| 250 |
+
)
|
| 251 |
+
|
| 252 |
+
except Exception:
|
| 253 |
+
logger.exception(f"Error in extracting tool call from response. Response: {model_output}")
|
| 254 |
+
return ExtractedToolCallInformation(
|
| 255 |
+
tools_called=False,
|
| 256 |
+
tool_calls=[],
|
| 257 |
+
content=model_output,
|
| 258 |
+
)
|
| 259 |
+
|
| 260 |
+
def extract_tool_calls_streaming(
|
| 261 |
+
self,
|
| 262 |
+
previous_text: str,
|
| 263 |
+
current_text: str,
|
| 264 |
+
delta_text: str,
|
| 265 |
+
previous_token_ids: Sequence[int],
|
| 266 |
+
current_token_ids: Sequence[int],
|
| 267 |
+
delta_token_ids: Sequence[int],
|
| 268 |
+
request: ChatCompletionRequest,
|
| 269 |
+
) -> Union[DeltaMessage, None]:
|
| 270 |
+
|
| 271 |
+
raise NotImplementedError("Tool calling is not supported in streaming mode!")
|
| 272 |
+
|
| 273 |
+
|
| 274 |
+
@ToolParserManager.register_module("llama_nemotron_pythonic")
|
| 275 |
+
class LlamaNemotronPythonicToolParser(ToolParser):
|
| 276 |
+
|
| 277 |
+
def __init__(self, tokenizer: AnyTokenizer):
|
| 278 |
+
super().__init__(tokenizer)
|
| 279 |
+
|
| 280 |
+
self.current_tool_name_sent: bool = False
|
| 281 |
+
self.prev_tool_call_arr: list[dict] = []
|
| 282 |
+
self.current_tool_id: int = -1
|
| 283 |
+
self.streamed_args_for_tool: list[str] = []
|
| 284 |
+
|
| 285 |
+
self.tool_call_start_token: str = "<TOOLCALL>"
|
| 286 |
+
self.tool_call_end_token: str = "</TOOLCALL>"
|
| 287 |
+
|
| 288 |
+
self.tool_call_regex = re.compile(r"<TOOLCALL>(.*?)</TOOLCALL>", re.DOTALL)
|
| 289 |
+
# Regex to parse pythonic function calls: function_name(arg1="value1", arg2=123, arg3=True)
|
| 290 |
+
self.function_call_regex = re.compile(r"(\w+)\((.*?)\)$", re.DOTALL)
|
| 291 |
+
|
| 292 |
+
def parse_function_arguments(self, args_str: str) -> dict:
|
| 293 |
+
"""Parse pythonic function arguments string into a dictionary"""
|
| 294 |
+
if not args_str.strip():
|
| 295 |
+
return {}
|
| 296 |
+
|
| 297 |
+
# Use ast.parse to safely parse the function call arguments
|
| 298 |
+
# We'll construct a temporary function call and parse it
|
| 299 |
+
try:
|
| 300 |
+
# Create a dummy function call to parse arguments
|
| 301 |
+
dummy_code = f"dummy_func({args_str})"
|
| 302 |
+
parsed = ast.parse(dummy_code, mode='eval')
|
| 303 |
+
|
| 304 |
+
# Extract arguments from the AST
|
| 305 |
+
call_node = parsed.body
|
| 306 |
+
if not isinstance(call_node, ast.Call):
|
| 307 |
+
return {}
|
| 308 |
+
|
| 309 |
+
arguments = {}
|
| 310 |
+
|
| 311 |
+
# Handle keyword arguments
|
| 312 |
+
for keyword in call_node.keywords:
|
| 313 |
+
if keyword.arg is None: # **kwargs
|
| 314 |
+
continue
|
| 315 |
+
|
| 316 |
+
# Convert AST value to Python value
|
| 317 |
+
try:
|
| 318 |
+
value = ast.literal_eval(keyword.value)
|
| 319 |
+
arguments[keyword.arg] = value
|
| 320 |
+
except (ValueError, TypeError):
|
| 321 |
+
# If literal_eval fails, try to get the raw value
|
| 322 |
+
if isinstance(keyword.value, ast.Name):
|
| 323 |
+
arguments[keyword.arg] = keyword.value.id
|
| 324 |
+
elif isinstance(keyword.value, ast.Constant):
|
| 325 |
+
arguments[keyword.arg] = keyword.value.value
|
| 326 |
+
else:
|
| 327 |
+
# Fallback: convert to string
|
| 328 |
+
arguments[keyword.arg] = ast.unparse(keyword.value)
|
| 329 |
+
|
| 330 |
+
# Handle positional arguments (less common in tool calls but supported)
|
| 331 |
+
for i, arg in enumerate(call_node.args):
|
| 332 |
+
try:
|
| 333 |
+
value = ast.literal_eval(arg)
|
| 334 |
+
arguments[f"arg_{i}"] = value
|
| 335 |
+
except (ValueError, TypeError):
|
| 336 |
+
if isinstance(arg, ast.Name):
|
| 337 |
+
arguments[f"arg_{i}"] = arg.id
|
| 338 |
+
elif isinstance(arg, ast.Constant):
|
| 339 |
+
arguments[f"arg_{i}"] = arg.value
|
| 340 |
+
else:
|
| 341 |
+
arguments[f"arg_{i}"] = ast.unparse(arg)
|
| 342 |
+
|
| 343 |
+
return arguments
|
| 344 |
+
|
| 345 |
+
except (SyntaxError, ValueError) as e:
|
| 346 |
+
logger.warning(f"Failed to parse function arguments '{args_str}': {e}")
|
| 347 |
+
return {}
|
| 348 |
+
|
| 349 |
+
def extract_tool_calls(
|
| 350 |
+
self,
|
| 351 |
+
model_output: str,
|
| 352 |
+
request: ChatCompletionRequest,
|
| 353 |
+
) -> ExtractedToolCallInformation:
|
| 354 |
+
|
| 355 |
+
if self.tool_call_start_token not in model_output:
|
| 356 |
+
return ExtractedToolCallInformation(
|
| 357 |
+
tools_called=False,
|
| 358 |
+
tool_calls=[],
|
| 359 |
+
content=model_output,
|
| 360 |
+
)
|
| 361 |
+
|
| 362 |
+
tool_call_start_index = model_output.find(self.tool_call_start_token)
|
| 363 |
+
content = model_output[:tool_call_start_index].strip()
|
| 364 |
+
|
| 365 |
+
try:
|
| 366 |
+
# Extract content between <TOOLCALL> tags
|
| 367 |
+
tool_call_matches = self.tool_call_regex.findall(model_output)
|
| 368 |
+
if not tool_call_matches:
|
| 369 |
+
return ExtractedToolCallInformation(
|
| 370 |
+
tools_called=False,
|
| 371 |
+
tool_calls=[],
|
| 372 |
+
content=model_output,
|
| 373 |
+
)
|
| 374 |
+
|
| 375 |
+
tool_calls_content = tool_call_matches[0].strip()
|
| 376 |
+
|
| 377 |
+
# Split by lines to get individual function calls
|
| 378 |
+
function_lines = [line.strip() for line in tool_calls_content.split('\n') if line.strip()]
|
| 379 |
+
|
| 380 |
+
parsed_tool_calls = []
|
| 381 |
+
|
| 382 |
+
for func_line in function_lines:
|
| 383 |
+
# Parse each function call
|
| 384 |
+
match = self.function_call_regex.match(func_line)
|
| 385 |
+
if not match:
|
| 386 |
+
logger.warning(f"Could not parse function call: {func_line}")
|
| 387 |
+
continue
|
| 388 |
+
|
| 389 |
+
function_name = match.group(1)
|
| 390 |
+
args_str = match.group(2)
|
| 391 |
+
|
| 392 |
+
# Parse arguments
|
| 393 |
+
parsed_arguments = self.parse_function_arguments(args_str)
|
| 394 |
+
|
| 395 |
+
# Apply type conversion based on schema if available
|
| 396 |
+
if request.tools:
|
| 397 |
+
for tool_def in request.tools:
|
| 398 |
+
if tool_def.function.name == function_name:
|
| 399 |
+
schema_properties = {}
|
| 400 |
+
if (tool_def.function.parameters and
|
| 401 |
+
isinstance(tool_def.function.parameters, dict) and
|
| 402 |
+
"properties" in tool_def.function.parameters and
|
| 403 |
+
isinstance(tool_def.function.parameters["properties"], dict)):
|
| 404 |
+
schema_properties = tool_def.function.parameters["properties"]
|
| 405 |
+
|
| 406 |
+
# Convert arguments based on schema types
|
| 407 |
+
for arg_name, arg_value in parsed_arguments.items():
|
| 408 |
+
if arg_name in schema_properties:
|
| 409 |
+
param_info = schema_properties[arg_name]
|
| 410 |
+
target_type = param_info.get("type")
|
| 411 |
+
|
| 412 |
+
try:
|
| 413 |
+
if target_type == "string" and not isinstance(arg_value, str):
|
| 414 |
+
parsed_arguments[arg_name] = str(arg_value)
|
| 415 |
+
elif target_type == "integer" and not isinstance(arg_value, int):
|
| 416 |
+
parsed_arguments[arg_name] = int(arg_value)
|
| 417 |
+
elif target_type == "number" and not isinstance(arg_value, (int, float)):
|
| 418 |
+
parsed_arguments[arg_name] = float(arg_value)
|
| 419 |
+
elif target_type == "boolean" and not isinstance(arg_value, bool):
|
| 420 |
+
if isinstance(arg_value, str):
|
| 421 |
+
parsed_arguments[arg_name] = arg_value.lower() in ['true', '1', 'yes']
|
| 422 |
+
else:
|
| 423 |
+
parsed_arguments[arg_name] = bool(arg_value)
|
| 424 |
+
elif target_type in ["object", "array"]:
|
| 425 |
+
if isinstance(arg_value, str):
|
| 426 |
+
try:
|
| 427 |
+
parsed_arguments[arg_name] = json.loads(arg_value)
|
| 428 |
+
except json.JSONDecodeError:
|
| 429 |
+
# Keep as string if JSON parsing fails
|
| 430 |
+
pass
|
| 431 |
+
except (ValueError, TypeError) as e:
|
| 432 |
+
logger.warning(f"Type conversion failed for {arg_name}: {e}")
|
| 433 |
+
# Keep original value if conversion fails
|
| 434 |
+
break
|
| 435 |
+
|
| 436 |
+
parsed_tool_calls.append(ToolCall(
|
| 437 |
+
id=f"call_{random_uuid()}",
|
| 438 |
+
type="function",
|
| 439 |
+
function=FunctionCall(
|
| 440 |
+
name=function_name,
|
| 441 |
+
arguments=json.dumps(parsed_arguments, ensure_ascii=False),
|
| 442 |
+
),
|
| 443 |
+
))
|
| 444 |
+
|
| 445 |
+
return ExtractedToolCallInformation(
|
| 446 |
+
tools_called=len(parsed_tool_calls) > 0,
|
| 447 |
+
tool_calls=parsed_tool_calls,
|
| 448 |
+
content=content if content else None,
|
| 449 |
+
)
|
| 450 |
+
|
| 451 |
+
except Exception:
|
| 452 |
+
logger.exception(f"Error in extracting pythonic tool call from response. Response: {model_output}")
|
| 453 |
+
return ExtractedToolCallInformation(
|
| 454 |
+
tools_called=False,
|
| 455 |
+
tool_calls=[],
|
| 456 |
+
content=model_output,
|
| 457 |
+
)
|
| 458 |
+
|
| 459 |
+
def extract_tool_calls_streaming(
|
| 460 |
+
self,
|
| 461 |
+
previous_text: str,
|
| 462 |
+
current_text: str,
|
| 463 |
+
delta_text: str,
|
| 464 |
+
previous_token_ids: Sequence[int],
|
| 465 |
+
current_token_ids: Sequence[int],
|
| 466 |
+
delta_token_ids: Sequence[int],
|
| 467 |
+
request: ChatCompletionRequest,
|
| 468 |
+
) -> Union[DeltaMessage, None]:
|
| 469 |
+
|
| 470 |
+
raise NotImplementedError("Tool calling is not supported in streaming mode!")
|
model-00001-of-00004.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:ae4461eba110cf3251a45c60a3a71e27c587f7df8d2bfccecd8c4aeb63298914
|
| 3 |
+
size 4999458416
|
model-00002-of-00004.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:20552cb6e7fb2d94e7976f2b60aaba540695498cd3582aba31a82569ba3a8b0f
|
| 3 |
+
size 4990786200
|
model-00003-of-00004.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:bc5884919e5d9af689f2908cd97963f8f8ddbbbfbe05ec0796fcd344210d3bdd
|
| 3 |
+
size 4988690304
|
model-00004-of-00004.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:6e2c02d5ea330834358c3031121cad2d3a9b5071fb589d8b85b88e06cd9658a1
|
| 3 |
+
size 419430616
|
model.safetensors.index.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
modeling.py
ADDED
|
@@ -0,0 +1,287 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import warnings
|
| 3 |
+
from typing import List, Optional, Tuple, Union
|
| 4 |
+
|
| 5 |
+
import torch
|
| 6 |
+
import transformers
|
| 7 |
+
from torch import nn
|
| 8 |
+
from torch.nn import CrossEntropyLoss
|
| 9 |
+
from transformers import AutoModel, AutoModelForCausalLM, GenerationConfig
|
| 10 |
+
from transformers.modeling_outputs import CausalLMOutputWithPast
|
| 11 |
+
from transformers.modeling_utils import PreTrainedModel
|
| 12 |
+
from transformers.utils import logging
|
| 13 |
+
|
| 14 |
+
from .configuration import NemotronH_Nano_VL_V2_Config
|
| 15 |
+
from .modeling_nemotron_h import NemotronHForCausalLM
|
| 16 |
+
from .evs import EfficientVideoSampling
|
| 17 |
+
|
| 18 |
+
logger = logging.get_logger(__name__)
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
"""
|
| 22 |
+
The following code is adapted from the
|
| 23 |
+
https://huggingface.co/OpenGVLab/InternVL2-Llama3-76B/blob/main/modeling_internvl_chat.py repository
|
| 24 |
+
|
| 25 |
+
The chat function is adapted to handle NVLM 1-D tile-tagging design for dynamic high-resolution images.
|
| 26 |
+
"""
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
class SquaredReLU(nn.Module):
|
| 30 |
+
def forward(self, x):
|
| 31 |
+
return torch.pow(torch.nn.functional.relu(x), 2)
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
class RMSNorm(nn.Module):
|
| 35 |
+
def __init__(self, hidden_size, eps=1e-5):
|
| 36 |
+
super().__init__()
|
| 37 |
+
self.weight = nn.Parameter(torch.ones(hidden_size))
|
| 38 |
+
self.eps = eps
|
| 39 |
+
|
| 40 |
+
def forward(self, hidden_states):
|
| 41 |
+
input_dtype = hidden_states.dtype
|
| 42 |
+
hidden_states = hidden_states.to(torch.float32)
|
| 43 |
+
variance = hidden_states.pow(2).mean(-1, keepdim=True)
|
| 44 |
+
hidden_states = hidden_states * torch.rsqrt(variance + self.eps)
|
| 45 |
+
return (self.weight.to(torch.float32) * hidden_states).to(input_dtype)
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
def version_cmp(v1, v2, op='eq'):
|
| 49 |
+
import operator
|
| 50 |
+
|
| 51 |
+
from packaging import version
|
| 52 |
+
op_func = getattr(operator, op)
|
| 53 |
+
return op_func(version.parse(v1), version.parse(v2))
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
class NemotronH_Nano_VL_V2(PreTrainedModel):
|
| 57 |
+
config_class = NemotronH_Nano_VL_V2_Config
|
| 58 |
+
main_input_name = 'pixel_values'
|
| 59 |
+
_supports_flash_attn_2 = True
|
| 60 |
+
_no_split_modules = ['NemotronHBlock']
|
| 61 |
+
|
| 62 |
+
def __init__(self, config: NemotronH_Nano_VL_V2_Config):
|
| 63 |
+
super().__init__(config)
|
| 64 |
+
|
| 65 |
+
assert version_cmp(transformers.__version__, '4.36.2', 'ge')
|
| 66 |
+
image_size = config.force_image_size
|
| 67 |
+
patch_size = config.patch_size
|
| 68 |
+
self.patch_size = patch_size
|
| 69 |
+
self.template = config.template
|
| 70 |
+
self.num_image_token = int((image_size // patch_size) ** 2 * (config.downsample_ratio ** 2))
|
| 71 |
+
self.downsample_ratio = config.downsample_ratio
|
| 72 |
+
self.ps_version = config.ps_version
|
| 73 |
+
self.image_tag_type = config.image_tag_type
|
| 74 |
+
self.img_context_token_id = config.img_context_token_id
|
| 75 |
+
self.video_context_token_id = config.video_context_token_id
|
| 76 |
+
|
| 77 |
+
logger.info(f'num_image_token: {self.num_image_token}')
|
| 78 |
+
logger.info(f'ps_version: {self.ps_version}')
|
| 79 |
+
|
| 80 |
+
self.language_model = AutoModelForCausalLM.from_config(config.llm_config, trust_remote_code=True)
|
| 81 |
+
self.vision_model = AutoModel.from_config(config.vision_config, trust_remote_code=True)
|
| 82 |
+
self.vision_model.model._initialize_weights = self.vision_model.model._init_weights # WAR for transformers issue 38358
|
| 83 |
+
self.vision_model.radio_model.make_preprocessor_external()
|
| 84 |
+
self.vision_model = self.vision_model.to(self.language_model.config.torch_dtype)
|
| 85 |
+
|
| 86 |
+
self.drop_vision_class_token = True
|
| 87 |
+
|
| 88 |
+
# Construct the vision projection.
|
| 89 |
+
# Default
|
| 90 |
+
vit_hidden_size = config.vit_hidden_size
|
| 91 |
+
vision_projection_hidden_size = config.projector_hidden_size
|
| 92 |
+
llm_hidden_size = config.llm_config.hidden_size
|
| 93 |
+
|
| 94 |
+
self.video_pruning_rate = config.video_pruning_rate
|
| 95 |
+
|
| 96 |
+
self.mlp1 = nn.Sequential(
|
| 97 |
+
RMSNorm(vit_hidden_size * int(1 / self.downsample_ratio) ** 2, eps=1e-5),
|
| 98 |
+
nn.Linear(vit_hidden_size * int(1 / self.downsample_ratio) ** 2, vision_projection_hidden_size, bias=False),
|
| 99 |
+
SquaredReLU(),
|
| 100 |
+
nn.Linear(vision_projection_hidden_size, llm_hidden_size, bias=False)
|
| 101 |
+
)
|
| 102 |
+
self.mlp1 = self.mlp1.to(self.language_model.config.torch_dtype)
|
| 103 |
+
|
| 104 |
+
def forward(
|
| 105 |
+
self,
|
| 106 |
+
pixel_values: torch.FloatTensor,
|
| 107 |
+
input_ids: torch.LongTensor = None,
|
| 108 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 109 |
+
position_ids: Optional[torch.LongTensor] = None,
|
| 110 |
+
image_flags: Optional[torch.LongTensor] = None,
|
| 111 |
+
past_key_values: Optional[List[torch.FloatTensor]] = None,
|
| 112 |
+
labels: Optional[torch.LongTensor] = None,
|
| 113 |
+
inputs_embeds = None,
|
| 114 |
+
use_cache: Optional[bool] = None,
|
| 115 |
+
output_attentions: Optional[bool] = None,
|
| 116 |
+
output_hidden_states: Optional[bool] = None,
|
| 117 |
+
return_dict: Optional[bool] = None,
|
| 118 |
+
) -> Union[Tuple, CausalLMOutputWithPast]:
|
| 119 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 120 |
+
|
| 121 |
+
if inputs_embeds is None:
|
| 122 |
+
inputs_embeds = self.language_model.get_input_embeddings()(input_ids)
|
| 123 |
+
|
| 124 |
+
image_flags = image_flags.squeeze(-1)
|
| 125 |
+
|
| 126 |
+
B, N, C = inputs_embeds.shape
|
| 127 |
+
inputs_embeds = inputs_embeds.reshape(B * N, C)
|
| 128 |
+
|
| 129 |
+
input_ids = input_ids.reshape(B * N)
|
| 130 |
+
selected = (input_ids == self.img_context_token_id)
|
| 131 |
+
|
| 132 |
+
vit_batch_size = pixel_values.shape[0]
|
| 133 |
+
vit_embeds = self.extract_feature(pixel_values)
|
| 134 |
+
|
| 135 |
+
del pixel_values
|
| 136 |
+
|
| 137 |
+
if torch.distributed.get_rank() == 0:
|
| 138 |
+
print(f'dynamic ViT batch size: {vit_batch_size}, images per sample: {vit_batch_size / B}, dynamic token length: {N}')
|
| 139 |
+
|
| 140 |
+
vit_embeds = vit_embeds[image_flags == 1]
|
| 141 |
+
try:
|
| 142 |
+
inputs_embeds[selected] = inputs_embeds[selected] * 0.0 + vit_embeds.reshape(-1, C)
|
| 143 |
+
except Exception as e:
|
| 144 |
+
vit_embeds = vit_embeds.reshape(-1, C)
|
| 145 |
+
print(f'warning: {e}, inputs_embeds[selected].shape={inputs_embeds[selected].shape}, '
|
| 146 |
+
f'vit_embeds.shape={vit_embeds.shape}')
|
| 147 |
+
n_token = selected.sum()
|
| 148 |
+
inputs_embeds[selected] = inputs_embeds[selected] * 0.0 + vit_embeds[:n_token]
|
| 149 |
+
|
| 150 |
+
del vit_embeds
|
| 151 |
+
|
| 152 |
+
inputs_embeds = inputs_embeds.reshape(B, N, C)
|
| 153 |
+
|
| 154 |
+
outputs = self.language_model(
|
| 155 |
+
inputs_embeds=inputs_embeds,
|
| 156 |
+
attention_mask=attention_mask,
|
| 157 |
+
position_ids=position_ids,
|
| 158 |
+
past_key_values=past_key_values,
|
| 159 |
+
use_cache=use_cache,
|
| 160 |
+
output_attentions=output_attentions,
|
| 161 |
+
output_hidden_states=output_hidden_states,
|
| 162 |
+
return_dict=return_dict,
|
| 163 |
+
)
|
| 164 |
+
logits = outputs.logits
|
| 165 |
+
|
| 166 |
+
loss = None
|
| 167 |
+
if labels is not None:
|
| 168 |
+
# Shift so that tokens < n predict n
|
| 169 |
+
shift_logits = logits[..., :-1, :].contiguous()
|
| 170 |
+
shift_labels = labels[..., 1:].contiguous()
|
| 171 |
+
# Flatten the tokens
|
| 172 |
+
loss_fct = CrossEntropyLoss()
|
| 173 |
+
shift_logits = shift_logits.view(-1, self.language_model.config.vocab_size)
|
| 174 |
+
shift_labels = shift_labels.view(-1)
|
| 175 |
+
# Enable model parallelism
|
| 176 |
+
shift_labels = shift_labels.to(shift_logits.device)
|
| 177 |
+
loss = loss_fct(shift_logits, shift_labels)
|
| 178 |
+
|
| 179 |
+
if not return_dict:
|
| 180 |
+
output = (logits,) + outputs[1:]
|
| 181 |
+
return (loss,) + output if loss is not None else output
|
| 182 |
+
|
| 183 |
+
return CausalLMOutputWithPast(
|
| 184 |
+
loss=loss,
|
| 185 |
+
logits=logits,
|
| 186 |
+
past_key_values=outputs.past_key_values,
|
| 187 |
+
hidden_states=outputs.hidden_states,
|
| 188 |
+
attentions=outputs.attentions,
|
| 189 |
+
)
|
| 190 |
+
|
| 191 |
+
def pixel_shuffle(self, x, scale_factor=0.5):
|
| 192 |
+
n, w, h, c = x.size()
|
| 193 |
+
# N, W, H, C --> N, W, H * scale, C // scale
|
| 194 |
+
x = x.view(n, w, int(h * scale_factor), int(c / scale_factor))
|
| 195 |
+
# N, W, H * scale, C // scale --> N, H * scale, W, C // scale
|
| 196 |
+
x = x.permute(0, 2, 1, 3).contiguous()
|
| 197 |
+
# N, H * scale, W, C // scale --> N, H * scale, W * scale, C // (scale ** 2)
|
| 198 |
+
x = x.view(n, int(h * scale_factor), int(w * scale_factor),
|
| 199 |
+
int(c / (scale_factor * scale_factor)))
|
| 200 |
+
if self.ps_version == 'v1':
|
| 201 |
+
warnings.warn("In ps_version 'v1', the height and width have not been swapped back, "
|
| 202 |
+
'which results in a transposed image.')
|
| 203 |
+
else:
|
| 204 |
+
x = x.permute(0, 2, 1, 3).contiguous()
|
| 205 |
+
return x
|
| 206 |
+
|
| 207 |
+
def extract_feature(self, pixel_values):
|
| 208 |
+
vit_embeds = self.vision_model(pixel_values).features
|
| 209 |
+
vit_embeds = vit_embeds.to(dtype=torch.bfloat16)
|
| 210 |
+
h = w = int(vit_embeds.shape[1] ** 0.5)
|
| 211 |
+
vit_embeds = vit_embeds.reshape(vit_embeds.shape[0], h, w, -1)
|
| 212 |
+
vit_embeds = self.pixel_shuffle(vit_embeds, scale_factor=self.downsample_ratio)
|
| 213 |
+
vit_embeds = vit_embeds.reshape(vit_embeds.shape[0], -1, vit_embeds.shape[-1])
|
| 214 |
+
vit_embeds = self.mlp1(vit_embeds)
|
| 215 |
+
return vit_embeds
|
| 216 |
+
|
| 217 |
+
@torch.no_grad()
|
| 218 |
+
def generate(
|
| 219 |
+
self,
|
| 220 |
+
pixel_values: Optional[torch.FloatTensor] = None,
|
| 221 |
+
pixel_values_videos: Optional[torch.FloatTensor] = None,
|
| 222 |
+
input_ids: Optional[torch.FloatTensor] = None,
|
| 223 |
+
attention_mask: Optional[torch.LongTensor] = None,
|
| 224 |
+
generation_config: Optional[GenerationConfig] = None,
|
| 225 |
+
output_hidden_states: Optional[bool] = None,
|
| 226 |
+
return_dict: Optional[bool] = None,
|
| 227 |
+
**generate_kwargs,
|
| 228 |
+
) -> torch.LongTensor:
|
| 229 |
+
assert self.img_context_token_id is not None
|
| 230 |
+
if pixel_values is not None or pixel_values_videos is not None:
|
| 231 |
+
image_vit_embeds, video_vit_embeds = None, None
|
| 232 |
+
if pixel_values is not None:
|
| 233 |
+
pixel_values = pixel_values.to(dtype=self.vision_model.config.torch_dtype)
|
| 234 |
+
image_vit_embeds = self.extract_feature(pixel_values)
|
| 235 |
+
if pixel_values_videos is not None:
|
| 236 |
+
pixel_values_videos = pixel_values_videos.to(dtype=self.vision_model.config.torch_dtype)
|
| 237 |
+
video_vit_embeds = self.extract_feature(pixel_values_videos)
|
| 238 |
+
inputs_embeds = self.language_model.get_input_embeddings()(input_ids)
|
| 239 |
+
B, N, C = inputs_embeds.shape
|
| 240 |
+
inputs_embeds = inputs_embeds.reshape(B * N, C)
|
| 241 |
+
input_ids_copy = input_ids.reshape(B * N)
|
| 242 |
+
if image_vit_embeds is not None:
|
| 243 |
+
image_mask = (input_ids_copy == self.img_context_token_id)
|
| 244 |
+
assert image_mask.sum() != 0
|
| 245 |
+
inputs_embeds[image_mask] = image_vit_embeds.reshape(-1, C).to(inputs_embeds.device, inputs_embeds.dtype)
|
| 246 |
+
if video_vit_embeds is not None:
|
| 247 |
+
if B > 1:
|
| 248 |
+
raise NotImplementedError("Video is not supported for batch size > 1")
|
| 249 |
+
video_mask = (input_ids_copy == self.video_context_token_id)
|
| 250 |
+
assert video_mask.sum() != 0
|
| 251 |
+
inputs_embeds[video_mask] = video_vit_embeds.reshape(-1, C).to(inputs_embeds.device, inputs_embeds.dtype)
|
| 252 |
+
if video_vit_embeds is not None and self.video_pruning_rate > 0: # EVS
|
| 253 |
+
h = w = int(video_vit_embeds.shape[1] ** 0.5) # assumption here (and everywhere else) is that shape is square
|
| 254 |
+
evs_mask = EfficientVideoSampling.compute_retention_mask(
|
| 255 |
+
video_embeds=video_vit_embeds,
|
| 256 |
+
thw=(video_vit_embeds.shape[0], h, w),
|
| 257 |
+
spatial_merge_size=1, # we already work on vision embeddings, so no downsampling to follow
|
| 258 |
+
q=self.video_pruning_rate,
|
| 259 |
+
)
|
| 260 |
+
print(f"pruning rate: {self.video_pruning_rate}, EVS mask: {evs_mask.sum().item()} tokens retained out of {evs_mask.numel()} total video tokens ({evs_mask.sum().item() / evs_mask.numel() * 100:.2f}%)")
|
| 261 |
+
|
| 262 |
+
retention_mask = torch.ones_like(input_ids_copy, dtype=torch.bool)
|
| 263 |
+
retention_mask[video_mask] = evs_mask.view(-1)
|
| 264 |
+
inputs_embeds = inputs_embeds[retention_mask].unsqueeze(0) # adding batch=1
|
| 265 |
+
if attention_mask is not None:
|
| 266 |
+
attention_mask = attention_mask[:, retention_mask].contiguous()
|
| 267 |
+
if input_ids is not None:
|
| 268 |
+
input_ids = input_ids[:, retention_mask].contiguous()
|
| 269 |
+
else:
|
| 270 |
+
inputs_embeds = inputs_embeds.reshape(B, N, C)
|
| 271 |
+
else:
|
| 272 |
+
inputs_embeds = self.language_model.get_input_embeddings()(input_ids)
|
| 273 |
+
# print(f"DEBUG: input_ids shape: {input_ids.shape}")
|
| 274 |
+
# print(f"DEBUG: input text: {self._tokenizer.decode(input_ids[0])}")
|
| 275 |
+
outputs = self.language_model.generate(
|
| 276 |
+
input_ids=input_ids,
|
| 277 |
+
inputs_embeds=inputs_embeds,
|
| 278 |
+
attention_mask=attention_mask,
|
| 279 |
+
generation_config=generation_config,
|
| 280 |
+
output_hidden_states=output_hidden_states,
|
| 281 |
+
use_cache=True,
|
| 282 |
+
# return_dict_in_generate=True,
|
| 283 |
+
# output_scores=True,
|
| 284 |
+
**generate_kwargs,
|
| 285 |
+
)
|
| 286 |
+
|
| 287 |
+
return outputs
|
modeling_nemotron_h.py
ADDED
|
@@ -0,0 +1,1636 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# coding=utf-8
|
| 2 |
+
# Copyright 2024 HuggingFace Inc. team.
|
| 3 |
+
# Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
|
| 4 |
+
#
|
| 5 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 6 |
+
# you may not use this file except in compliance with the License.
|
| 7 |
+
# You may obtain a copy of the License at
|
| 8 |
+
#
|
| 9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 10 |
+
#
|
| 11 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 12 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 13 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 14 |
+
# See the License for the specific language governing permissions and
|
| 15 |
+
# limitations under the License.
|
| 16 |
+
"""PyTorch NemotronH model."""
|
| 17 |
+
|
| 18 |
+
import math
|
| 19 |
+
from dataclasses import dataclass
|
| 20 |
+
from typing import Any, Dict, Optional, Tuple, Union
|
| 21 |
+
|
| 22 |
+
import torch
|
| 23 |
+
import torch.utils.checkpoint
|
| 24 |
+
from torch import nn
|
| 25 |
+
from torch.nn import CrossEntropyLoss
|
| 26 |
+
|
| 27 |
+
from transformers.activations import ACT2FN
|
| 28 |
+
from transformers.cache_utils import DynamicCache # we need __iter__ and __len__ of pkv
|
| 29 |
+
from transformers.generation import GenerationMixin
|
| 30 |
+
from transformers.modeling_attn_mask_utils import (
|
| 31 |
+
AttentionMaskConverter,
|
| 32 |
+
)
|
| 33 |
+
from transformers.modeling_utils import PreTrainedModel
|
| 34 |
+
from transformers.utils import (
|
| 35 |
+
ModelOutput,
|
| 36 |
+
add_code_sample_docstrings,
|
| 37 |
+
add_start_docstrings,
|
| 38 |
+
add_start_docstrings_to_model_forward,
|
| 39 |
+
logging,
|
| 40 |
+
)
|
| 41 |
+
from transformers.utils.import_utils import (
|
| 42 |
+
is_causal_conv1d_available,
|
| 43 |
+
is_flash_attn_2_available,
|
| 44 |
+
is_flash_attn_greater_or_equal_2_10,
|
| 45 |
+
is_mamba_2_ssm_available,
|
| 46 |
+
)
|
| 47 |
+
from .configuration_nemotron_h import NemotronHConfig
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
logger = logging.get_logger(__name__)
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
# Copied from transformers.models.mamba.modeling_mamba2.modeling_mamba2.py with MAMBA2->NEMOTRONH,Mamba2->NemotronH
|
| 54 |
+
# For Mamba2 components Mamba2->NemotronHMamba2
|
| 55 |
+
if is_mamba_2_ssm_available():
|
| 56 |
+
from mamba_ssm.ops.triton.selective_state_update import selective_state_update
|
| 57 |
+
from mamba_ssm.ops.triton.ssd_combined import mamba_chunk_scan_combined, mamba_split_conv1d_scan_combined
|
| 58 |
+
else:
|
| 59 |
+
mamba_chunk_scan_combined, mamba_split_conv1d_scan_combined, selective_state_update = None, None, None
|
| 60 |
+
|
| 61 |
+
try:
|
| 62 |
+
#from mamba_ssm.ops.triton.layernorm_gated import RMSNorm as RMSNormGated
|
| 63 |
+
from mamba_ssm.ops.triton.layernorm_gated import rmsnorm_fn
|
| 64 |
+
except ImportError:
|
| 65 |
+
raise ImportError("mamba-ssm is required by the Mamba model but cannot be imported")
|
| 66 |
+
|
| 67 |
+
if is_causal_conv1d_available():
|
| 68 |
+
from causal_conv1d import causal_conv1d_fn, causal_conv1d_update
|
| 69 |
+
else:
|
| 70 |
+
causal_conv1d_update, causal_conv1d_fn = None, None
|
| 71 |
+
|
| 72 |
+
if is_flash_attn_2_available():
|
| 73 |
+
from transformers.modeling_flash_attention_utils import _flash_attention_forward
|
| 74 |
+
|
| 75 |
+
is_fast_path_available = all(
|
| 76 |
+
(
|
| 77 |
+
selective_state_update,
|
| 78 |
+
mamba_chunk_scan_combined,
|
| 79 |
+
mamba_split_conv1d_scan_combined,
|
| 80 |
+
causal_conv1d_fn,
|
| 81 |
+
causal_conv1d_update,
|
| 82 |
+
)
|
| 83 |
+
)
|
| 84 |
+
|
| 85 |
+
|
| 86 |
+
_CHECKPOINT_FOR_DOC = "nvidia/Nemotron-H-56B-Base-8K"
|
| 87 |
+
_CONFIG_FOR_DOC = "NemotronHConfig"
|
| 88 |
+
|
| 89 |
+
|
| 90 |
+
# Helper methods for segment sum computation
|
| 91 |
+
|
| 92 |
+
|
| 93 |
+
def pad_tensor_by_size(input_tensor: torch.Tensor, pad_size: int):
|
| 94 |
+
"""
|
| 95 |
+
Padding x tensor with `pad_size` on the seq_len dim (dim=1)
|
| 96 |
+
|
| 97 |
+
Assumes that we only have tensors of either size 4 or 3
|
| 98 |
+
"""
|
| 99 |
+
pad_shape = (0, 0, 0, 0, 0, pad_size, 0, 0) if len(input_tensor.shape) == 4 else (0, 0, 0, pad_size, 0, 0)
|
| 100 |
+
|
| 101 |
+
return torch.nn.functional.pad(input_tensor, pad_shape, mode="constant", value=0)
|
| 102 |
+
|
| 103 |
+
|
| 104 |
+
def reshape_into_chunks(input_tensor, pad_size, chunk_size):
|
| 105 |
+
"""
|
| 106 |
+
Padding input_tensor with `pad_size` on the seq_len dim (dim=1) and
|
| 107 |
+
simultaneously splitting it into chunk sequences.
|
| 108 |
+
|
| 109 |
+
Assumes that we only have tensors of either size 4 or 3
|
| 110 |
+
"""
|
| 111 |
+
# [bsz, seq_len, ...] -> [bsz, seq_len multiple of chunk_size, ...]
|
| 112 |
+
input_tensor = pad_tensor_by_size(input_tensor, pad_size)
|
| 113 |
+
|
| 114 |
+
if len(input_tensor.shape) == 3:
|
| 115 |
+
# [bsz, seq_len multiple of chunk_size, num_heads] -> [bsz, -1, chunk_size, num_heads]
|
| 116 |
+
return input_tensor.reshape(input_tensor.shape[0], -1, chunk_size, input_tensor.shape[2])
|
| 117 |
+
else:
|
| 118 |
+
# [bsz, seq_len multiple of chunk_size, num_heads, head_dim or state_size] -> [bsz, -1, chunk_size, num_heads, head_dim or state_size]
|
| 119 |
+
return input_tensor.reshape(
|
| 120 |
+
input_tensor.shape[0], -1, chunk_size, input_tensor.shape[2], input_tensor.shape[3]
|
| 121 |
+
)
|
| 122 |
+
|
| 123 |
+
|
| 124 |
+
def segment_sum(input_tensor):
|
| 125 |
+
"""
|
| 126 |
+
More stable segment sum calculation. Uses cumulative sums and masking instead of direct subtractions.
|
| 127 |
+
"""
|
| 128 |
+
chunk_size = input_tensor.size(-1)
|
| 129 |
+
# 1. expand input tensor to have an additional dimension and repeat along that dimension
|
| 130 |
+
# [..., chunk_size] -> [..., chunk_size, chunk_size]
|
| 131 |
+
input_tensor = input_tensor[..., None].expand(*input_tensor.size(), chunk_size)
|
| 132 |
+
# 2. create a lower triangular mask with the diagonal set to 0 to 0 out elements above diag
|
| 133 |
+
mask = torch.tril(torch.ones(chunk_size, chunk_size, device=input_tensor.device, dtype=torch.bool), diagonal=-1)
|
| 134 |
+
input_tensor = input_tensor.masked_fill(~mask, 0)
|
| 135 |
+
# 3. compute actual cumsum
|
| 136 |
+
tensor_segsum = torch.cumsum(input_tensor, dim=-2)
|
| 137 |
+
|
| 138 |
+
# 4. apply mask to keep only the lower triangular part of the cumulative sum result (incl diagonal this time)
|
| 139 |
+
mask = torch.tril(torch.ones(chunk_size, chunk_size, device=input_tensor.device, dtype=torch.bool), diagonal=0)
|
| 140 |
+
tensor_segsum = tensor_segsum.masked_fill(~mask, -torch.inf)
|
| 141 |
+
return tensor_segsum
|
| 142 |
+
|
| 143 |
+
|
| 144 |
+
def apply_mask_to_padding_states(hidden_states, attention_mask):
|
| 145 |
+
"""
|
| 146 |
+
Tunes out the hidden states for padding tokens, see https://github.com/state-spaces/mamba/issues/66
|
| 147 |
+
"""
|
| 148 |
+
if attention_mask is not None and attention_mask.shape[1] > 1 and attention_mask.shape[0] > 1:
|
| 149 |
+
dtype = hidden_states.dtype
|
| 150 |
+
hidden_states = (hidden_states * attention_mask[:, :, None]).to(dtype)
|
| 151 |
+
|
| 152 |
+
return hidden_states
|
| 153 |
+
|
| 154 |
+
# Copied from https://github.com/huggingface/transformers/blob/main/src/transformers/models/jamba/modeling_jamba.py
|
| 155 |
+
class HybridMambaAttentionDynamicCache(DynamicCache):
|
| 156 |
+
"""
|
| 157 |
+
A dynamic cache that can handle both the attention cache (which has a seq_len dimension) and the mamba cache
|
| 158 |
+
(which has a constant shape regardless of seq_len).
|
| 159 |
+
|
| 160 |
+
This cache has two sets of lists of tensors: `key_cache` and `value_cache` for attention cache and `conv_states`
|
| 161 |
+
and `ssm_states` for mamba cache. Each of these lists has `num_layers` tensors. The expected shape for each tensor
|
| 162 |
+
For attention layers, `key_cache` and `value_cache` have a shape of `(batch_size, num_heads, seq_len, head_dim)`,
|
| 163 |
+
while `conv_states` and `ssm_states` have a shape of `(batch_size, 0)` (empty tensors).
|
| 164 |
+
For mamba layers, `key_cache` and `value_cache` have a shape of `(batch_size, 0)` (empty tensors),
|
| 165 |
+
while `conv_states` represents the convolution state and has a shape of `(batch_size, d_inner, d_conv)`,
|
| 166 |
+
and `ssm_states` represents the ssm state and has a shape of `(batch_size, d_inner, d_state)`.
|
| 167 |
+
"""
|
| 168 |
+
|
| 169 |
+
def __init__(self, config, batch_size, dtype=torch.float16, device=None):
|
| 170 |
+
super().__init__()
|
| 171 |
+
self.dtype = dtype
|
| 172 |
+
self.hybrid_override_pattern = config.hybrid_override_pattern
|
| 173 |
+
self.has_previous_state = False # only used by mamba
|
| 174 |
+
#intermediate_size = config.expand * config.hidden_size
|
| 175 |
+
intermediate_size = config.mamba_num_heads * config.mamba_head_dim
|
| 176 |
+
ssm_state_size = config.ssm_state_size
|
| 177 |
+
conv_kernel_size = config.conv_kernel
|
| 178 |
+
self.conv_states = []
|
| 179 |
+
self.ssm_states = []
|
| 180 |
+
self.transformer_layers = []
|
| 181 |
+
for i in range(config.num_hidden_layers):
|
| 182 |
+
if self.hybrid_override_pattern[i] == "M":
|
| 183 |
+
# Mamba layer
|
| 184 |
+
self.conv_states += [
|
| 185 |
+
torch.zeros(batch_size, intermediate_size, conv_kernel_size, device=device, dtype=dtype)
|
| 186 |
+
]
|
| 187 |
+
self.ssm_states += [
|
| 188 |
+
torch.zeros(batch_size, intermediate_size, ssm_state_size, device=device, dtype=torch.float32)
|
| 189 |
+
]
|
| 190 |
+
else:
|
| 191 |
+
# Attention or MLP layer
|
| 192 |
+
self.conv_states += [torch.tensor([[]] * batch_size, device=device)]
|
| 193 |
+
self.ssm_states += [torch.tensor([[]] * batch_size, device=device)]
|
| 194 |
+
self.transformer_layers.append(i)
|
| 195 |
+
|
| 196 |
+
self.key_cache = [torch.tensor([[]] * batch_size, device=device) for _ in range(config.num_hidden_layers)]
|
| 197 |
+
self.value_cache = [torch.tensor([[]] * batch_size, device=device) for _ in range(config.num_hidden_layers)]
|
| 198 |
+
|
| 199 |
+
def update(
|
| 200 |
+
self,
|
| 201 |
+
key_states: torch.Tensor,
|
| 202 |
+
value_states: torch.Tensor,
|
| 203 |
+
layer_idx: int,
|
| 204 |
+
cache_kwargs: Optional[Dict[str, Any]] = None,
|
| 205 |
+
) -> Tuple[torch.Tensor, torch.Tensor]:
|
| 206 |
+
# Update the cache
|
| 207 |
+
if self.key_cache[layer_idx].shape[-1] == 0:
|
| 208 |
+
self.key_cache[layer_idx] = key_states
|
| 209 |
+
self.value_cache[layer_idx] = value_states
|
| 210 |
+
else:
|
| 211 |
+
self.key_cache[layer_idx] = torch.cat([self.key_cache[layer_idx], key_states], dim=2)
|
| 212 |
+
self.value_cache[layer_idx] = torch.cat([self.value_cache[layer_idx], value_states], dim=2)
|
| 213 |
+
|
| 214 |
+
return self.key_cache[layer_idx], self.value_cache[layer_idx]
|
| 215 |
+
|
| 216 |
+
def reorder_cache(self, beam_idx: torch.LongTensor):
|
| 217 |
+
"""Reorders the cache for beam search, given the selected beam indices."""
|
| 218 |
+
for layer_idx in range(len(self.key_cache)):
|
| 219 |
+
device = self.key_cache[layer_idx].device
|
| 220 |
+
self.key_cache[layer_idx] = self.key_cache[layer_idx].index_select(0, beam_idx.to(device))
|
| 221 |
+
device = self.value_cache[layer_idx].device
|
| 222 |
+
self.value_cache[layer_idx] = self.value_cache[layer_idx].index_select(0, beam_idx.to(device))
|
| 223 |
+
|
| 224 |
+
device = self.conv_states[layer_idx].device
|
| 225 |
+
self.conv_states[layer_idx] = self.conv_states[layer_idx].index_select(0, beam_idx.to(device))
|
| 226 |
+
device = self.ssm_states[layer_idx].device
|
| 227 |
+
self.ssm_states[layer_idx] = self.ssm_states[layer_idx].index_select(0, beam_idx.to(device))
|
| 228 |
+
|
| 229 |
+
def get_seq_length(self, layer_idx: Optional[int] = 0) -> int:
|
| 230 |
+
"""Returns the sequence length of the cached states. A layer index can be optionally passed."""
|
| 231 |
+
# take any layer that contains cache and not empty tensor
|
| 232 |
+
layer_idx = self.transformer_layers[0] if layer_idx not in self.transformer_layers else layer_idx
|
| 233 |
+
if len(self.key_cache) <= layer_idx:
|
| 234 |
+
return 0
|
| 235 |
+
return self.key_cache[layer_idx].shape[-2]
|
| 236 |
+
|
| 237 |
+
def to_legacy_cache(self) -> Tuple[Tuple[torch.Tensor], Tuple[torch.Tensor]]:
|
| 238 |
+
raise NotImplementedError("HybridMambaAttentionDynamicCache does not have a legacy cache equivalent.")
|
| 239 |
+
|
| 240 |
+
@classmethod
|
| 241 |
+
def from_legacy_cache(cls, past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None) -> "DynamicCache":
|
| 242 |
+
raise NotImplementedError("HybridMambaAttentionDynamicCache does not have a legacy cache equivalent.")
|
| 243 |
+
|
| 244 |
+
# Copied from modeling_mamba2.py
|
| 245 |
+
def update_conv_state(
|
| 246 |
+
self, layer_idx: int, new_conv_state: torch.Tensor, cache_init: bool = False
|
| 247 |
+
) -> torch.Tensor:
|
| 248 |
+
if cache_init:
|
| 249 |
+
self.conv_states[layer_idx] = new_conv_state.to(self.conv_states.device)
|
| 250 |
+
else:
|
| 251 |
+
self.conv_states[layer_idx] = self.conv_states[layer_idx].roll(shifts=-1, dims=-1)
|
| 252 |
+
self.conv_states[layer_idx][:, :, -1] = new_conv_state[:, 0, :].to(self.conv_states.device)
|
| 253 |
+
return self.conv_states[layer_idx]
|
| 254 |
+
|
| 255 |
+
def update_ssm_state(self, layer_idx: int, new_ssm_state: torch.Tensor):
|
| 256 |
+
self.ssm_states[layer_idx] = new_ssm_state.to(self.ssm_states.device)
|
| 257 |
+
return self.ssm_states[layer_idx]
|
| 258 |
+
|
| 259 |
+
def reset(self):
|
| 260 |
+
self.conv_states.zero_()
|
| 261 |
+
self.ssm_states.zero_()
|
| 262 |
+
|
| 263 |
+
class MambaRMSNormGated(torch.nn.Module):
|
| 264 |
+
def __init__(self, hidden_size, group_size, eps=1e-5):
|
| 265 |
+
super().__init__()
|
| 266 |
+
self.weight = nn.Parameter(torch.ones(hidden_size))
|
| 267 |
+
self.variance_epsilon = eps
|
| 268 |
+
self.group_size = group_size
|
| 269 |
+
|
| 270 |
+
# jan28b version
|
| 271 |
+
def forward(self, hidden_states, gate=None):
|
| 272 |
+
return rmsnorm_fn(x=hidden_states,
|
| 273 |
+
weight=self.weight,
|
| 274 |
+
bias=None, # No bias
|
| 275 |
+
z=gate,
|
| 276 |
+
eps=self.variance_epsilon,
|
| 277 |
+
group_size=self.group_size,
|
| 278 |
+
norm_before_gate=False
|
| 279 |
+
)
|
| 280 |
+
|
| 281 |
+
class NemotronHMamba2Mixer(nn.Module):
|
| 282 |
+
"""
|
| 283 |
+
Compute ∆, A, B, C, and D the state space parameters and compute the `contextualized_states`.
|
| 284 |
+
A, D are input independent (see Mamba paper [1] Section 3.5.2 "Interpretation of A" for why A isn't selective)
|
| 285 |
+
∆, B, C are input-dependent (this is a key difference between Mamba and the linear time invariant S4,
|
| 286 |
+
and is why Mamba is called **selective** state spaces)
|
| 287 |
+
"""
|
| 288 |
+
|
| 289 |
+
def __init__(self, config: NemotronHConfig, layer_idx: int):
|
| 290 |
+
super().__init__()
|
| 291 |
+
self.num_heads = config.mamba_num_heads
|
| 292 |
+
self.hidden_size = config.hidden_size
|
| 293 |
+
self.ssm_state_size = config.ssm_state_size
|
| 294 |
+
self.conv_kernel_size = config.conv_kernel
|
| 295 |
+
self.intermediate_size = config.mamba_num_heads * config.mamba_head_dim
|
| 296 |
+
self.layer_idx = layer_idx
|
| 297 |
+
self.use_conv_bias = config.use_conv_bias
|
| 298 |
+
self.activation = config.mamba_hidden_act
|
| 299 |
+
self.act = ACT2FN[config.mamba_hidden_act]
|
| 300 |
+
|
| 301 |
+
self.layer_norm_epsilon = config.layer_norm_epsilon
|
| 302 |
+
|
| 303 |
+
self.n_groups = config.n_groups
|
| 304 |
+
self.head_dim = config.mamba_head_dim
|
| 305 |
+
self.chunk_size = config.chunk_size
|
| 306 |
+
|
| 307 |
+
self.time_step_limit = config.time_step_limit
|
| 308 |
+
self.time_step_min = config.time_step_min
|
| 309 |
+
self.time_step_max = config.time_step_max
|
| 310 |
+
|
| 311 |
+
self.conv_dim = self.intermediate_size + 2 * self.n_groups * self.ssm_state_size
|
| 312 |
+
self.conv1d = nn.Conv1d(
|
| 313 |
+
in_channels=self.conv_dim,
|
| 314 |
+
out_channels=self.conv_dim,
|
| 315 |
+
bias=config.use_conv_bias,
|
| 316 |
+
kernel_size=config.conv_kernel,
|
| 317 |
+
groups=self.conv_dim,
|
| 318 |
+
padding=config.conv_kernel - 1,
|
| 319 |
+
)
|
| 320 |
+
|
| 321 |
+
# projection of the input hidden states
|
| 322 |
+
projection_size = self.intermediate_size + self.conv_dim + self.num_heads
|
| 323 |
+
self.in_proj = nn.Linear(
|
| 324 |
+
self.hidden_size,
|
| 325 |
+
projection_size,
|
| 326 |
+
bias=config.use_bias,
|
| 327 |
+
)
|
| 328 |
+
# selective projection used to make dt, B and C input dependant
|
| 329 |
+
|
| 330 |
+
# time step projection (discretization)
|
| 331 |
+
# instantiate once and copy inv_dt in init_weights of PretrainedModel
|
| 332 |
+
self.dt_bias = nn.Parameter(torch.ones(self.num_heads))
|
| 333 |
+
|
| 334 |
+
# S4D real initialization. These are not discretized!
|
| 335 |
+
# The core is to load them, compute the discrete states, then write the updated state. Keeps the memory bounded
|
| 336 |
+
A = torch.arange(1, self.num_heads + 1)
|
| 337 |
+
self.A_log = nn.Parameter(torch.log(A))
|
| 338 |
+
self.A_log._no_weight_decay = True
|
| 339 |
+
self.norm = MambaRMSNormGated(self.intermediate_size, eps=self.layer_norm_epsilon, group_size=self.intermediate_size // self.n_groups)
|
| 340 |
+
self.D = nn.Parameter(torch.ones(self.num_heads))
|
| 341 |
+
self.D._no_weight_decay = True
|
| 342 |
+
|
| 343 |
+
self.out_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=config.use_bias)
|
| 344 |
+
self.use_bias = config.use_bias
|
| 345 |
+
|
| 346 |
+
if not is_fast_path_available:
|
| 347 |
+
logger.warning_once(
|
| 348 |
+
"The fast path is not available because on of `(selective_state_update, causal_conv1d_fn, causal_conv1d_update)`"
|
| 349 |
+
" is None. Falling back to the naive implementation. To install follow https://github.com/state-spaces/mamba/#installation and"
|
| 350 |
+
" https://github.com/Dao-AILab/causal-conv1d"
|
| 351 |
+
)
|
| 352 |
+
|
| 353 |
+
def cuda_kernels_forward(
|
| 354 |
+
self,
|
| 355 |
+
hidden_states: torch.Tensor,
|
| 356 |
+
cache_params: Optional[HybridMambaAttentionDynamicCache] = None,
|
| 357 |
+
cache_position: Optional[torch.LongTensor] = None,
|
| 358 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 359 |
+
):
|
| 360 |
+
# 1. Gated MLP's linear projection
|
| 361 |
+
hidden_states = apply_mask_to_padding_states(hidden_states, attention_mask)
|
| 362 |
+
projected_states = self.in_proj(hidden_states)
|
| 363 |
+
|
| 364 |
+
# Set up dimensions for reshapes later
|
| 365 |
+
batch_size, seq_len, _ = hidden_states.shape
|
| 366 |
+
groups_time_state_size = self.n_groups * self.ssm_state_size
|
| 367 |
+
d_mlp = (
|
| 368 |
+
projected_states.shape[-1]
|
| 369 |
+
- 2 * self.intermediate_size
|
| 370 |
+
- 2 * self.n_groups * self.ssm_state_size
|
| 371 |
+
- self.num_heads
|
| 372 |
+
) // 2
|
| 373 |
+
|
| 374 |
+
# Single step calculations via cache
|
| 375 |
+
if cache_params is not None and cache_position is not None and cache_position[0] > 0:
|
| 376 |
+
_, _, gate, hidden_states_B_C, dt = projected_states.squeeze(1).split(
|
| 377 |
+
[d_mlp, d_mlp, self.intermediate_size, self.conv_dim, self.num_heads], dim=-1
|
| 378 |
+
)
|
| 379 |
+
|
| 380 |
+
# 2. Convolution sequence transformation
|
| 381 |
+
hidden_states_B_C = causal_conv1d_update(
|
| 382 |
+
hidden_states_B_C,
|
| 383 |
+
cache_params.conv_states[self.layer_idx],
|
| 384 |
+
self.conv1d.weight.squeeze(1),
|
| 385 |
+
self.conv1d.bias,
|
| 386 |
+
self.activation,
|
| 387 |
+
)
|
| 388 |
+
|
| 389 |
+
hidden_states, B, C = torch.split(
|
| 390 |
+
hidden_states_B_C,
|
| 391 |
+
[self.intermediate_size, groups_time_state_size, groups_time_state_size],
|
| 392 |
+
dim=-1,
|
| 393 |
+
)
|
| 394 |
+
|
| 395 |
+
# 3. SSM transformation
|
| 396 |
+
A = -torch.exp(self.A_log.float()) # (nheads,)
|
| 397 |
+
A = A[:, None, ...][:, :, None].expand(-1, self.head_dim, self.ssm_state_size).to(dtype=torch.float32)
|
| 398 |
+
dt = dt[:, :, None].expand(-1, -1, self.head_dim)
|
| 399 |
+
dt_bias = self.dt_bias[:, None, ...].expand(-1, self.head_dim)
|
| 400 |
+
D = self.D[:, None, ...].expand(-1, self.head_dim)
|
| 401 |
+
B = B.view(batch_size, self.n_groups, B.shape[1] // self.n_groups)
|
| 402 |
+
C = C.view(batch_size, self.n_groups, C.shape[1] // self.n_groups)
|
| 403 |
+
hidden_states_reshaped = hidden_states.view(batch_size, self.num_heads, self.head_dim)
|
| 404 |
+
hidden_states = selective_state_update(
|
| 405 |
+
cache_params.ssm_states[self.layer_idx],
|
| 406 |
+
hidden_states_reshaped,
|
| 407 |
+
dt,
|
| 408 |
+
A,
|
| 409 |
+
B,
|
| 410 |
+
C,
|
| 411 |
+
D,
|
| 412 |
+
z=None,
|
| 413 |
+
dt_bias=dt_bias,
|
| 414 |
+
dt_softplus=True,
|
| 415 |
+
)
|
| 416 |
+
hidden_states = hidden_states.view(batch_size, self.num_heads * self.head_dim)
|
| 417 |
+
hidden_states = self.norm(hidden_states, gate)
|
| 418 |
+
|
| 419 |
+
# 4. Final linear projection
|
| 420 |
+
out = self.out_proj(hidden_states)[:, None, ...]
|
| 421 |
+
|
| 422 |
+
# Fused calculations or step by step if no initialized cache is found
|
| 423 |
+
else:
|
| 424 |
+
A = -torch.exp(self.A_log.float()) # (num_heads) or (intermediate_size, state_size)
|
| 425 |
+
dt_limit_kwargs = {} if self.time_step_limit == (0.0, float("inf")) else {"dt_limit": self.time_step_limit}
|
| 426 |
+
|
| 427 |
+
# 2-4. Fused kernel for conv1d, SSM, and the final projection
|
| 428 |
+
if self.training and cache_params is None:
|
| 429 |
+
out = mamba_split_conv1d_scan_combined(
|
| 430 |
+
projected_states,
|
| 431 |
+
self.conv1d.weight.squeeze(1),
|
| 432 |
+
self.conv1d.bias,
|
| 433 |
+
self.dt_bias,
|
| 434 |
+
A,
|
| 435 |
+
D=self.D,
|
| 436 |
+
chunk_size=self.chunk_size,
|
| 437 |
+
seq_idx=None, # was seq_idx
|
| 438 |
+
activation=self.activation,
|
| 439 |
+
rmsnorm_weight=self.norm.weight,
|
| 440 |
+
rmsnorm_eps=self.norm.variance_epsilon,
|
| 441 |
+
outproj_weight=self.out_proj.weight,
|
| 442 |
+
outproj_bias=self.out_proj.bias,
|
| 443 |
+
headdim=self.head_dim,
|
| 444 |
+
ngroups=self.n_groups,
|
| 445 |
+
norm_before_gate=False,
|
| 446 |
+
return_final_states=False,
|
| 447 |
+
**dt_limit_kwargs,
|
| 448 |
+
)
|
| 449 |
+
|
| 450 |
+
else:
|
| 451 |
+
_, _, gate, hidden_states_B_C, dt = projected_states.split(
|
| 452 |
+
[d_mlp, d_mlp, self.intermediate_size, self.conv_dim, self.num_heads], dim=-1
|
| 453 |
+
)
|
| 454 |
+
|
| 455 |
+
# 2. Convolution sequence transformation
|
| 456 |
+
# Init cache
|
| 457 |
+
if cache_params is not None:
|
| 458 |
+
hidden_states_B_C_transposed = hidden_states_B_C.transpose(1, 2)
|
| 459 |
+
conv_states = nn.functional.pad(
|
| 460 |
+
hidden_states_B_C_transposed,
|
| 461 |
+
(cache_params.conv_kernel_size - hidden_states_B_C_transposed.shape[-1], 0),
|
| 462 |
+
)
|
| 463 |
+
cache_params.update_conv_state(
|
| 464 |
+
layer_idx=self.layer_idx, new_conv_state=conv_states, cache_init=True
|
| 465 |
+
)
|
| 466 |
+
|
| 467 |
+
if self.activation not in ["silu", "swish"]:
|
| 468 |
+
hidden_states_B_C = self.act(
|
| 469 |
+
self.conv1d(hidden_states_B_C.transpose(1, 2))[..., :seq_len].transpose(1, 2)
|
| 470 |
+
)
|
| 471 |
+
else:
|
| 472 |
+
hidden_states_B_C = causal_conv1d_fn(
|
| 473 |
+
x=hidden_states_B_C.transpose(1, 2),
|
| 474 |
+
weight=self.conv1d.weight.squeeze(1),
|
| 475 |
+
bias=self.conv1d.bias,
|
| 476 |
+
activation=self.activation,
|
| 477 |
+
).transpose(1, 2)
|
| 478 |
+
hidden_states_B_C = apply_mask_to_padding_states(hidden_states_B_C, attention_mask)
|
| 479 |
+
hidden_states, B, C = torch.split(
|
| 480 |
+
hidden_states_B_C,
|
| 481 |
+
[self.intermediate_size, groups_time_state_size, groups_time_state_size],
|
| 482 |
+
dim=-1,
|
| 483 |
+
)
|
| 484 |
+
|
| 485 |
+
# 3. SSM transformation
|
| 486 |
+
scan_output, ssm_state = mamba_chunk_scan_combined(
|
| 487 |
+
hidden_states.view(batch_size, seq_len, -1, self.head_dim),
|
| 488 |
+
dt,
|
| 489 |
+
A,
|
| 490 |
+
B.view(batch_size, seq_len, self.n_groups, -1),
|
| 491 |
+
C.view(batch_size, seq_len, self.n_groups, -1),
|
| 492 |
+
chunk_size=self.chunk_size,
|
| 493 |
+
D=self.D,
|
| 494 |
+
z=None,
|
| 495 |
+
seq_idx=None,
|
| 496 |
+
return_final_states=True,
|
| 497 |
+
dt_bias=self.dt_bias,
|
| 498 |
+
dt_softplus=True,
|
| 499 |
+
**dt_limit_kwargs,
|
| 500 |
+
)
|
| 501 |
+
|
| 502 |
+
# Init cache
|
| 503 |
+
if ssm_state is not None and cache_params is not None:
|
| 504 |
+
cache_params.update_ssm_state(layer_idx=self.layer_idx, new_ssm_state=ssm_state)
|
| 505 |
+
|
| 506 |
+
scan_output = scan_output.view(batch_size, seq_len, -1)
|
| 507 |
+
|
| 508 |
+
# Multiply "gate" branch and apply extra normalization layer
|
| 509 |
+
scan_output = self.norm(scan_output, gate)
|
| 510 |
+
|
| 511 |
+
# 4. Final linear projection
|
| 512 |
+
out = self.out_proj(scan_output)
|
| 513 |
+
return out
|
| 514 |
+
|
| 515 |
+
# fmt: off
|
| 516 |
+
def torch_forward(self, input_states, cache_params: Optional[HybridMambaAttentionDynamicCache]=None, cache_position:Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None):
|
| 517 |
+
batch_size, seq_len, _ = input_states.shape
|
| 518 |
+
dtype = input_states.dtype
|
| 519 |
+
|
| 520 |
+
# 1. Gated MLP's linear projection
|
| 521 |
+
input_states = apply_mask_to_padding_states(input_states, attention_mask)
|
| 522 |
+
projected_states = self.in_proj(input_states)
|
| 523 |
+
d_mlp = (projected_states.shape[-1] - 2 * self.intermediate_size - 2 * self.n_groups * self.ssm_state_size-self.num_heads) // 2
|
| 524 |
+
_, _, gate, hidden_states_B_C, dt = projected_states.split(
|
| 525 |
+
[d_mlp, d_mlp, self.intermediate_size, self.conv_dim, self.num_heads], dim=-1
|
| 526 |
+
)
|
| 527 |
+
|
| 528 |
+
# 2. Convolution sequence transformation
|
| 529 |
+
if cache_params is not None and cache_position is not None and cache_position[0] > 0:
|
| 530 |
+
cache_params.update_conv_state(layer_idx=self.layer_idx, new_conv_state=hidden_states_B_C, cache_init=False)
|
| 531 |
+
|
| 532 |
+
# We need to guarantee that anything regarding the cache is on the same device
|
| 533 |
+
conv_states = cache_params.conv_states[self.layer_idx].to(device=self.conv1d.weight.device)
|
| 534 |
+
|
| 535 |
+
hidden_states_B_C = torch.sum(
|
| 536 |
+
conv_states * self.conv1d.weight.squeeze(1), dim=-1
|
| 537 |
+
)
|
| 538 |
+
if self.use_conv_bias:
|
| 539 |
+
hidden_states_B_C = hidden_states_B_C + self.conv1d.bias
|
| 540 |
+
hidden_states_B_C = self.act(hidden_states_B_C)
|
| 541 |
+
else:
|
| 542 |
+
# Init cache
|
| 543 |
+
if cache_params is not None:
|
| 544 |
+
hidden_states_B_C_transposed = hidden_states_B_C.transpose(1, 2)
|
| 545 |
+
conv_states = nn.functional.pad(
|
| 546 |
+
hidden_states_B_C_transposed, (cache_params.conv_kernel_size - hidden_states_B_C_transposed.shape[-1], 0)
|
| 547 |
+
)
|
| 548 |
+
cache_params.update_conv_state(layer_idx=self.layer_idx, new_conv_state=conv_states, cache_init=True)
|
| 549 |
+
|
| 550 |
+
hidden_states_B_C = self.act(self.conv1d(hidden_states_B_C.transpose(1, 2))[..., :seq_len].transpose(1, 2))
|
| 551 |
+
|
| 552 |
+
hidden_states_B_C = apply_mask_to_padding_states(hidden_states_B_C, attention_mask)
|
| 553 |
+
hidden_states, B, C = torch.split(
|
| 554 |
+
hidden_states_B_C,
|
| 555 |
+
[self.intermediate_size, self.n_groups * self.ssm_state_size, self.n_groups * self.ssm_state_size],
|
| 556 |
+
dim=-1
|
| 557 |
+
)
|
| 558 |
+
|
| 559 |
+
# 3. SSM transformation
|
| 560 |
+
A = -torch.exp(self.A_log.float()) # [num_heads]
|
| 561 |
+
if cache_params is not None and cache_position is not None and cache_position[0] > 0:
|
| 562 |
+
# We need to guarantee that anything regarding the cache is on the same device
|
| 563 |
+
cache_device = cache_params.ssm_states.device
|
| 564 |
+
|
| 565 |
+
# Note: there is no need to pad parameter matrices here, as there is just one new token
|
| 566 |
+
# for batched generation
|
| 567 |
+
dt = dt[:, 0, :][:, None, ...]
|
| 568 |
+
dt = dt.transpose(1, 2).expand(batch_size, dt.shape[-1], self.head_dim)
|
| 569 |
+
# [num_heads] -> [num_heads, head_dim]
|
| 570 |
+
dt_bias = self.dt_bias[..., None].expand(self.dt_bias.shape[0], self.head_dim)
|
| 571 |
+
|
| 572 |
+
dt = torch.nn.functional.softplus(dt + dt_bias.to(dt.dtype))
|
| 573 |
+
dt = torch.clamp(dt, self.time_step_limit[0], self.time_step_limit[1])
|
| 574 |
+
A = A[..., None, None].expand(self.num_heads, self.head_dim, self.ssm_state_size).to(dtype=torch.float32)
|
| 575 |
+
# [bsz, num_heads, head_dim, state_size]
|
| 576 |
+
dA = (torch.exp(dt[..., None] * A)).to(device=cache_device)
|
| 577 |
+
|
| 578 |
+
# Discretize B
|
| 579 |
+
# [bsz, n_groups * state_size] -> [bsz, n_groups, 1, state_size] ->
|
| 580 |
+
# -> [bsz, n_groups, group to head repetition factor, state_size] -> [bsz, num_heads, state_size]
|
| 581 |
+
B = B.reshape(batch_size, self.n_groups, -1)[..., None, :]
|
| 582 |
+
B = B.expand(batch_size, self.n_groups, self.num_heads // self.n_groups, B.shape[-1]).contiguous()
|
| 583 |
+
B = B.reshape(batch_size, -1, B.shape[-1])
|
| 584 |
+
# [bsz, num_heads, head_dim, state_size]
|
| 585 |
+
dB = dt[..., None] * B[..., None, :]
|
| 586 |
+
|
| 587 |
+
# Discretize x into dB
|
| 588 |
+
# [bsz, intermediate_size] -> [bsz, num_heads, head_dim]
|
| 589 |
+
hidden_states = hidden_states.reshape(batch_size, -1, self.head_dim)
|
| 590 |
+
dBx = (dB * hidden_states[..., None]).to(device=cache_device)
|
| 591 |
+
|
| 592 |
+
# State calculation
|
| 593 |
+
cache_params.update_ssm_state(
|
| 594 |
+
layer_idx=self.layer_idx,
|
| 595 |
+
new_ssm_state=cache_params.ssm_states[self.layer_idx] * dA + dBx
|
| 596 |
+
)
|
| 597 |
+
|
| 598 |
+
# Subsequent output
|
| 599 |
+
# [bsz, n_groups * state_size] -> [bsz, num_heads, state_size]
|
| 600 |
+
C = C.reshape(batch_size, self.n_groups, -1)[..., None, :]
|
| 601 |
+
C = C.expand(batch_size, self.n_groups, self.num_heads // self.n_groups, C.shape[-1]).contiguous()
|
| 602 |
+
C = C.reshape(batch_size, -1, C.shape[-1])
|
| 603 |
+
# [bsz, num_heads, head_dim]
|
| 604 |
+
|
| 605 |
+
ssm_states = cache_params.ssm_states[self.layer_idx].to(device=C.device, dtype=C.dtype) # Shape: [b, h, d, n]
|
| 606 |
+
# Reshape ssm_states to merge the first two dimensions
|
| 607 |
+
ssm_states_reshaped = ssm_states.view(batch_size * self.num_heads, self.head_dim, self.ssm_state_size) # Shape: [b*h, d, n]
|
| 608 |
+
C_reshaped = C.view(batch_size * self.num_heads, self.ssm_state_size, 1) # Shape: [b*h, n, 1]
|
| 609 |
+
y = torch.bmm(ssm_states_reshaped, C_reshaped)
|
| 610 |
+
y = y.view(batch_size, self.num_heads, self.head_dim)
|
| 611 |
+
|
| 612 |
+
# D skip connection
|
| 613 |
+
# [num_heads] -> [num_heads, head_dim]
|
| 614 |
+
D = self.D[..., None].expand(self.D.shape[0], self.head_dim)
|
| 615 |
+
y = (y + hidden_states * D).to(y.dtype)
|
| 616 |
+
|
| 617 |
+
# [bsz, num_heads, head_dim] -> [bsz, 1, intermediate_size]
|
| 618 |
+
y = y.reshape(batch_size, -1)[:, None, ...]
|
| 619 |
+
else:
|
| 620 |
+
# begin ssd naive implementation without einsums
|
| 621 |
+
dt = nn.functional.softplus(dt + self.dt_bias)
|
| 622 |
+
dt = torch.clamp(dt, self.time_step_limit[0], self.time_step_limit[1])
|
| 623 |
+
hidden_states = hidden_states.reshape(batch_size, seq_len, -1, self.head_dim).float()
|
| 624 |
+
B = B.reshape(batch_size, seq_len, -1, self.ssm_state_size).float()
|
| 625 |
+
C = C.reshape(batch_size, seq_len, -1, self.ssm_state_size).float()
|
| 626 |
+
B = B.repeat(1, 1, self.num_heads // self.n_groups, 1)
|
| 627 |
+
C = C.repeat(1, 1, self.num_heads // self.n_groups, 1)
|
| 628 |
+
pad_size = (self.chunk_size - seq_len % self.chunk_size) % self.chunk_size
|
| 629 |
+
|
| 630 |
+
D_residual = self.D[..., None] * pad_tensor_by_size(hidden_states, pad_size)
|
| 631 |
+
|
| 632 |
+
# Discretize x and A
|
| 633 |
+
hidden_states = hidden_states * dt[..., None]
|
| 634 |
+
A = A.to(hidden_states.dtype) * dt
|
| 635 |
+
|
| 636 |
+
# Rearrange into blocks/chunks
|
| 637 |
+
hidden_states, A, B, C = [reshape_into_chunks(t, pad_size, self.chunk_size) for t in (hidden_states, A, B, C)]
|
| 638 |
+
|
| 639 |
+
# [bsz, -1, chunk_size, num_heads] -> [bsz, num_heads, -1, chunk_size]
|
| 640 |
+
A = A.permute(0, 3, 1, 2)
|
| 641 |
+
A_cumsum = torch.cumsum(A, dim=-1)
|
| 642 |
+
|
| 643 |
+
# 1. Compute the output for each intra-chunk (diagonal blocks)
|
| 644 |
+
# This is the analog of a causal mask
|
| 645 |
+
L = torch.exp(segment_sum(A))
|
| 646 |
+
|
| 647 |
+
# Contraction of C and B to get G (attention-weights like)
|
| 648 |
+
G_intermediate = C[:, :, :, None, :, :] * B[:, :, None, :, :, :] # shape: (b, c, l, s, h, n)
|
| 649 |
+
G = G_intermediate.sum(dim=-1) # shape: (b, c, l, s, h)
|
| 650 |
+
|
| 651 |
+
# Compute M, equivalent to applying attention mask to weights
|
| 652 |
+
M_intermediate = G[..., None] * L.permute(0, 2, 3, 4, 1)[..., None]
|
| 653 |
+
M = M_intermediate.sum(dim=-1)
|
| 654 |
+
|
| 655 |
+
# Compute Y_diag (apply to values)
|
| 656 |
+
Y_diag = (M[..., None] * hidden_states[:, :, None]).sum(dim=3)
|
| 657 |
+
|
| 658 |
+
# 2. Compute the state for each intra-chunk
|
| 659 |
+
# (right term of low-rank factorization of off-diagonal blocks; B terms)
|
| 660 |
+
decay_states = torch.exp((A_cumsum[:, :, :, -1:] - A_cumsum))
|
| 661 |
+
B_decay = B * decay_states.permute(0, -2, -1, 1)[..., None]
|
| 662 |
+
states = (B_decay[..., None, :] * hidden_states[..., None]).sum(dim=2)
|
| 663 |
+
|
| 664 |
+
# 3. Compute the inter-chunk SSM recurrence; produces correct SSM states at chunk boundaries
|
| 665 |
+
# (middle term of factorization of off-diag blocks; A terms)
|
| 666 |
+
if cache_params is not None and cache_position is not None and cache_position[0] > 0:
|
| 667 |
+
previous_states = cache_params.ssm_states[self.layer_idx][:, None, ...].to(device=states.device)
|
| 668 |
+
else:
|
| 669 |
+
previous_states = torch.zeros_like(states[:, :1])
|
| 670 |
+
states = torch.cat([previous_states, states], dim=1)
|
| 671 |
+
decay_chunk = torch.exp(segment_sum(nn.functional.pad(A_cumsum[:, :, :, -1], (1, 0))))
|
| 672 |
+
decay_chunk = decay_chunk.transpose(1, 3)
|
| 673 |
+
new_states = (decay_chunk[..., None, None] * states[:, :, None, ...]).sum(dim=1)
|
| 674 |
+
states, ssm_state = new_states[:, :-1], new_states[:, -1]
|
| 675 |
+
|
| 676 |
+
# 4. Compute state -> output conversion per chunk
|
| 677 |
+
# (left term of low-rank factorization of off-diagonal blocks; C terms)
|
| 678 |
+
state_decay_out = torch.exp(A_cumsum)
|
| 679 |
+
C_times_states = (C[..., None, :] * states[:, :, None, ...])
|
| 680 |
+
state_decay_out_permuted = state_decay_out.permute(0, 2, 3, 1)
|
| 681 |
+
Y_off = (C_times_states.sum(-1) * state_decay_out_permuted[..., None])
|
| 682 |
+
|
| 683 |
+
# Add output of intra-chunk and inter-chunk terms (diagonal and off-diagonal blocks)
|
| 684 |
+
y = Y_diag + Y_off
|
| 685 |
+
# [bsz, -1, self.chunk_size, num_heads, head_dim] -> [bsz, (padded) seq_len, num_heads, head_dim]
|
| 686 |
+
y = y.reshape(batch_size, -1, self.num_heads, self.head_dim)
|
| 687 |
+
|
| 688 |
+
y = y + D_residual
|
| 689 |
+
# Cutting off padded chunks
|
| 690 |
+
if pad_size > 0:
|
| 691 |
+
y = y[:, :seq_len, :, :]
|
| 692 |
+
y = y.reshape(batch_size, seq_len, -1)
|
| 693 |
+
|
| 694 |
+
# Init cache
|
| 695 |
+
if ssm_state is not None and cache_params is not None:
|
| 696 |
+
cache_params.update_ssm_state(layer_idx=self.layer_idx, new_ssm_state=ssm_state)
|
| 697 |
+
|
| 698 |
+
scan_output = self.norm(y, gate)
|
| 699 |
+
|
| 700 |
+
# end ssd naive
|
| 701 |
+
|
| 702 |
+
# 4. Final linear projection
|
| 703 |
+
contextualized_states = self.out_proj(scan_output.to(dtype)) # [batch, seq_len, hidden_size]
|
| 704 |
+
return contextualized_states
|
| 705 |
+
# fmt: on
|
| 706 |
+
|
| 707 |
+
def forward(
|
| 708 |
+
self,
|
| 709 |
+
hidden_states,
|
| 710 |
+
cache_params: Optional[HybridMambaAttentionDynamicCache] = None,
|
| 711 |
+
cache_position: Optional[torch.LongTensor] = None,
|
| 712 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 713 |
+
):
|
| 714 |
+
if is_fast_path_available and "cuda" in self.in_proj.weight.device.type:
|
| 715 |
+
return self.cuda_kernels_forward(hidden_states, cache_params, cache_position, attention_mask)
|
| 716 |
+
dtype = hidden_states.dtype
|
| 717 |
+
if attention_mask is not None and attention_mask.shape[1] > 1 and attention_mask.shape[0] > 1:
|
| 718 |
+
# tune out hidden states for pad tokens, see https://github.com/state-spaces/mamba/issues/66
|
| 719 |
+
hidden_states = (hidden_states * attention_mask[:, :, None]).to(dtype)
|
| 720 |
+
|
| 721 |
+
return self.torch_forward(hidden_states, cache_params, cache_position, attention_mask)
|
| 722 |
+
|
| 723 |
+
|
| 724 |
+
class NemotronHRMSNorm(nn.Module):
|
| 725 |
+
def __init__(self, hidden_size, eps=1e-6):
|
| 726 |
+
"""
|
| 727 |
+
NemotronHRMSNorm is equivalent to T5LayerNorm and LlamaRMSNorm
|
| 728 |
+
"""
|
| 729 |
+
super().__init__()
|
| 730 |
+
self.weight = nn.Parameter(torch.ones(hidden_size))
|
| 731 |
+
self.variance_epsilon = eps
|
| 732 |
+
|
| 733 |
+
def forward(self, hidden_states):
|
| 734 |
+
input_dtype = hidden_states.dtype
|
| 735 |
+
hidden_states = hidden_states.to(torch.float32)
|
| 736 |
+
variance = hidden_states.pow(2).mean(-1, keepdim=True)
|
| 737 |
+
hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
|
| 738 |
+
# Weights are in float32
|
| 739 |
+
return (self.weight.to(torch.float32) * hidden_states).to(input_dtype)
|
| 740 |
+
|
| 741 |
+
class NemotronHBlock(nn.Module):
|
| 742 |
+
def __init__(self, config, layer_idx):
|
| 743 |
+
super().__init__()
|
| 744 |
+
self.config = config
|
| 745 |
+
self.layer_idx = layer_idx
|
| 746 |
+
self.residual_in_fp32 = config.residual_in_fp32
|
| 747 |
+
self.norm = NemotronHRMSNorm(config.hidden_size, eps=config.layer_norm_epsilon)
|
| 748 |
+
|
| 749 |
+
# M: Mamba2, *: Attention, -: MLP
|
| 750 |
+
self.block_type = config.layers_block_type[layer_idx]
|
| 751 |
+
if self.block_type == "mamba":
|
| 752 |
+
self.mixer = NemotronHMamba2Mixer(config, layer_idx=layer_idx)
|
| 753 |
+
elif self.block_type == "attention":
|
| 754 |
+
self.mixer = NEMOTRONH_ATTENTION_CLASSES[config._attn_implementation](config, layer_idx=layer_idx)
|
| 755 |
+
elif self.block_type == "mlp":
|
| 756 |
+
self.mixer = NemotronHMLP(config, layer_idx=layer_idx)
|
| 757 |
+
else:
|
| 758 |
+
raise ValueError(f"Invalid layer pattern {config.hybrid_override_pattern[layer_idx]}")
|
| 759 |
+
|
| 760 |
+
def forward(
|
| 761 |
+
self,
|
| 762 |
+
hidden_states,
|
| 763 |
+
cache_params: Optional[HybridMambaAttentionDynamicCache] = None,
|
| 764 |
+
cache_position: Optional[torch.LongTensor] = None,
|
| 765 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 766 |
+
):
|
| 767 |
+
with torch.cuda.stream(torch.cuda.default_stream(hidden_states.device)):
|
| 768 |
+
# * Use torch.cuda.stream() to avoid NaN issues when using multiple GPUs
|
| 769 |
+
residual = hidden_states
|
| 770 |
+
hidden_states = self.norm(hidden_states.to(dtype=self.norm.weight.dtype))
|
| 771 |
+
if self.residual_in_fp32:
|
| 772 |
+
residual = residual.to(torch.float32)
|
| 773 |
+
|
| 774 |
+
if self.block_type == "mamba":
|
| 775 |
+
hidden_states = self.mixer(
|
| 776 |
+
hidden_states, cache_params=cache_params, cache_position=cache_position
|
| 777 |
+
)
|
| 778 |
+
elif self.block_type == "attention":
|
| 779 |
+
hidden_states = self.mixer(
|
| 780 |
+
hidden_states, cache_position=cache_position
|
| 781 |
+
)
|
| 782 |
+
hidden_states = hidden_states[0]
|
| 783 |
+
elif self.block_type == "mlp":
|
| 784 |
+
hidden_states = self.mixer(
|
| 785 |
+
hidden_states
|
| 786 |
+
)
|
| 787 |
+
else:
|
| 788 |
+
raise ValueError(f"Invalid block_type: {self.block_type}")
|
| 789 |
+
|
| 790 |
+
hidden_states = residual + hidden_states
|
| 791 |
+
return hidden_states
|
| 792 |
+
|
| 793 |
+
|
| 794 |
+
# Copied from transformers.models.nemotron.modeling_nemotron Nemotron->NemotronH
|
| 795 |
+
class NemotronHMLP(nn.Module):
|
| 796 |
+
def __init__(self, config, layer_idx: Optional[int] = None):
|
| 797 |
+
super().__init__()
|
| 798 |
+
self.config = config
|
| 799 |
+
self.layer_idx = layer_idx
|
| 800 |
+
if layer_idx is None:
|
| 801 |
+
logger.warning_once(
|
| 802 |
+
f"Instantiating {self.__class__.__name__} without passing a `layer_idx` is not recommended and will "
|
| 803 |
+
"lead to errors during the forward call if caching is used. Please make sure to provide a `layer_idx` "
|
| 804 |
+
"when creating this class."
|
| 805 |
+
)
|
| 806 |
+
self.hidden_size = config.hidden_size
|
| 807 |
+
#intermediate_size = config.expand * config.hidden_size
|
| 808 |
+
self.intermediate_size = config.intermediate_size
|
| 809 |
+
self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=config.mlp_bias)
|
| 810 |
+
self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=config.mlp_bias)
|
| 811 |
+
self.act_fn = ACT2FN[config.mlp_hidden_act]
|
| 812 |
+
|
| 813 |
+
def forward(self, x):
|
| 814 |
+
return self.down_proj(self.act_fn(self.up_proj(x)))
|
| 815 |
+
|
| 816 |
+
|
| 817 |
+
# Copied from transformers.models.llama.modeling_llama.repeat_kv
|
| 818 |
+
def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
|
| 819 |
+
"""
|
| 820 |
+
This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
|
| 821 |
+
num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
|
| 822 |
+
"""
|
| 823 |
+
batch, num_key_value_heads, slen, head_dim = hidden_states.shape
|
| 824 |
+
if n_rep == 1:
|
| 825 |
+
return hidden_states
|
| 826 |
+
hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
|
| 827 |
+
return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
|
| 828 |
+
|
| 829 |
+
|
| 830 |
+
class NemotronHAttention(nn.Module):
|
| 831 |
+
"""Multi-headed attention from 'Attention Is All You Need' paper"""
|
| 832 |
+
|
| 833 |
+
def __init__(self, config: NemotronHConfig, layer_idx: Optional[int] = None):
|
| 834 |
+
super().__init__()
|
| 835 |
+
self.config = config
|
| 836 |
+
self.layer_idx = layer_idx
|
| 837 |
+
if layer_idx is None:
|
| 838 |
+
logger.warning_once(
|
| 839 |
+
f"Instantiating {self.__class__.__name__} without passing a `layer_idx` is not recommended and will "
|
| 840 |
+
"lead to errors during the forward call if caching is used. Please make sure to provide a `layer_idx` "
|
| 841 |
+
"when creating this class."
|
| 842 |
+
)
|
| 843 |
+
|
| 844 |
+
self.attention_dropout = config.attention_dropout
|
| 845 |
+
self.hidden_size = config.hidden_size
|
| 846 |
+
self.num_heads = config.num_attention_heads
|
| 847 |
+
if config.head_dim is not None:
|
| 848 |
+
self.head_dim = config.head_dim
|
| 849 |
+
else:
|
| 850 |
+
self.head_dim = config.hidden_size // config.num_attention_heads
|
| 851 |
+
self.num_key_value_heads = config.num_key_value_heads
|
| 852 |
+
self.num_key_value_groups = self.num_heads // self.num_key_value_heads
|
| 853 |
+
self.max_position_embeddings = config.max_position_embeddings
|
| 854 |
+
self.is_causal = True
|
| 855 |
+
|
| 856 |
+
self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=config.attention_bias)
|
| 857 |
+
self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=config.attention_bias)
|
| 858 |
+
self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=config.attention_bias)
|
| 859 |
+
self.o_proj = nn.Linear(self.head_dim * self.num_heads, self.hidden_size, bias=config.attention_bias)
|
| 860 |
+
|
| 861 |
+
def forward(
|
| 862 |
+
self,
|
| 863 |
+
hidden_states: torch.Tensor,
|
| 864 |
+
# position_embeddings: Tuple[torch.Tensor, torch.Tensor], #TODO
|
| 865 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 866 |
+
position_ids: Optional[torch.LongTensor] = None,
|
| 867 |
+
past_key_value: Optional[HybridMambaAttentionDynamicCache] = None,
|
| 868 |
+
output_attentions: bool = False,
|
| 869 |
+
use_cache: bool = False,
|
| 870 |
+
cache_position: Optional[torch.LongTensor] = None,
|
| 871 |
+
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
|
| 872 |
+
bsz, q_len, _ = hidden_states.size()
|
| 873 |
+
|
| 874 |
+
query_states = self.q_proj(hidden_states)
|
| 875 |
+
key_states = self.k_proj(hidden_states)
|
| 876 |
+
value_states = self.v_proj(hidden_states)
|
| 877 |
+
|
| 878 |
+
query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
|
| 879 |
+
key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
|
| 880 |
+
value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
|
| 881 |
+
|
| 882 |
+
if past_key_value is not None:
|
| 883 |
+
key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx)
|
| 884 |
+
|
| 885 |
+
key_states = repeat_kv(key_states, self.num_key_value_groups)
|
| 886 |
+
value_states = repeat_kv(value_states, self.num_key_value_groups)
|
| 887 |
+
|
| 888 |
+
causal_mask = attention_mask
|
| 889 |
+
if attention_mask is not None: # no matter the length, we just slice it
|
| 890 |
+
causal_mask = attention_mask[:, :, :, : key_states.shape[-2]]
|
| 891 |
+
|
| 892 |
+
if query_states.device.type == "cuda" and attention_mask is not None:
|
| 893 |
+
query_states = query_states.contiguous()
|
| 894 |
+
key_states = key_states.contiguous()
|
| 895 |
+
value_states = value_states.contiguous()
|
| 896 |
+
|
| 897 |
+
is_causal = True if causal_mask is None and q_len > 1 else False
|
| 898 |
+
|
| 899 |
+
attn_output = torch.nn.functional.scaled_dot_product_attention(
|
| 900 |
+
query_states,
|
| 901 |
+
key_states,
|
| 902 |
+
value_states,
|
| 903 |
+
attn_mask=causal_mask,
|
| 904 |
+
dropout_p=self.attention_dropout if self.training else 0.0,
|
| 905 |
+
is_causal=is_causal,
|
| 906 |
+
)
|
| 907 |
+
attn_output = attn_output.transpose(1, 2).contiguous()
|
| 908 |
+
#attn_output = attn_output.view(bsz, q_len, self.hidden_size)
|
| 909 |
+
attn_output = attn_output.view(bsz, q_len, self.num_heads * self.head_dim)
|
| 910 |
+
|
| 911 |
+
attn_output = self.o_proj(attn_output)
|
| 912 |
+
|
| 913 |
+
return attn_output, None, past_key_value
|
| 914 |
+
|
| 915 |
+
|
| 916 |
+
# Adapted from transformers.models.mistral.modeling_mistral.MistralFlashAttention2 with Mistral->Jamba
|
| 917 |
+
#class JambaFlashAttention2(JambaAttention):
|
| 918 |
+
class NemotronHFlashAttention2(NemotronHAttention):
|
| 919 |
+
"""
|
| 920 |
+
Jamba flash attention module. This module inherits from `JambaAttention` as the weights of the module stays
|
| 921 |
+
untouched. The only required change would be on the forward pass where it needs to correctly call the public API of
|
| 922 |
+
flash attention and deal with padding tokens in case the input contains any of them.
|
| 923 |
+
"""
|
| 924 |
+
def __init__(self, *args, **kwargs):
|
| 925 |
+
super().__init__(*args, **kwargs)
|
| 926 |
+
|
| 927 |
+
# TODO: Should be removed once Flash Attention for RoCm is bumped to 2.1.
|
| 928 |
+
# flash_attn<2.1 generates top-left aligned causal mask, while what is needed here is bottom-right alignement, that was made default for flash_attn>=2.1. This attribute is used to handle this difference. Reference: https://github.com/Dao-AILab/flash-attention/releases/tag/v2.1.0.
|
| 929 |
+
# Beware that with flash_attn<2.1, using q_seqlen != k_seqlen (except for the case q_seqlen == 1) produces a wrong mask (top-left).
|
| 930 |
+
self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10()
|
| 931 |
+
|
| 932 |
+
def forward(
|
| 933 |
+
self,
|
| 934 |
+
hidden_states: torch.Tensor,
|
| 935 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 936 |
+
position_ids: Optional[torch.LongTensor] = None,
|
| 937 |
+
past_key_value: Optional[HybridMambaAttentionDynamicCache] = None,
|
| 938 |
+
output_attentions: bool = False,
|
| 939 |
+
use_cache: bool = False,
|
| 940 |
+
cache_position: Optional[torch.LongTensor] = None,
|
| 941 |
+
**kwargs,
|
| 942 |
+
):
|
| 943 |
+
bsz, q_len, _ = hidden_states.size()
|
| 944 |
+
|
| 945 |
+
query_states = self.q_proj(hidden_states)
|
| 946 |
+
key_states = self.k_proj(hidden_states)
|
| 947 |
+
value_states = self.v_proj(hidden_states)
|
| 948 |
+
|
| 949 |
+
# Flash attention requires the input to have the shape
|
| 950 |
+
# batch_size x seq_length x head_dim x hidden_dim
|
| 951 |
+
# therefore we just need to keep the original shape
|
| 952 |
+
query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim)
|
| 953 |
+
key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
|
| 954 |
+
value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
|
| 955 |
+
|
| 956 |
+
if past_key_value is not None:
|
| 957 |
+
key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx)
|
| 958 |
+
|
| 959 |
+
# repeat k/v heads if n_kv_heads < n_heads
|
| 960 |
+
key_states = repeat_kv(key_states, self.num_key_value_groups)
|
| 961 |
+
value_states = repeat_kv(value_states, self.num_key_value_groups)
|
| 962 |
+
dropout_rate = 0.0 if not self.training else self.attention_dropout
|
| 963 |
+
|
| 964 |
+
# In PEFT, usually we cast the layer norms in float32 for training stability reasons
|
| 965 |
+
# therefore the input hidden states gets silently casted in float32. Hence, we need
|
| 966 |
+
# cast them back in float16 just to be sure everything works as expected.
|
| 967 |
+
input_dtype = query_states.dtype
|
| 968 |
+
if input_dtype == torch.float32:
|
| 969 |
+
if torch.is_autocast_enabled():
|
| 970 |
+
target_dtype = torch.get_autocast_gpu_dtype()
|
| 971 |
+
# Handle the case where the model is quantized
|
| 972 |
+
elif hasattr(self.config, "_pre_quantization_dtype"):
|
| 973 |
+
target_dtype = self.config._pre_quantization_dtype
|
| 974 |
+
else:
|
| 975 |
+
target_dtype = self.q_proj.weight.dtype
|
| 976 |
+
|
| 977 |
+
logger.warning_once(
|
| 978 |
+
f"The input hidden states seems to be silently casted in float32, this might be related to"
|
| 979 |
+
f" the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in"
|
| 980 |
+
f" {target_dtype}."
|
| 981 |
+
)
|
| 982 |
+
|
| 983 |
+
query_states = query_states.to(target_dtype)
|
| 984 |
+
key_states = key_states.to(target_dtype)
|
| 985 |
+
value_states = value_states.to(target_dtype)
|
| 986 |
+
|
| 987 |
+
# Reashape to the expected shape for Flash Attention
|
| 988 |
+
key_states = key_states.transpose(1, 2)
|
| 989 |
+
value_states = value_states.transpose(1, 2)
|
| 990 |
+
|
| 991 |
+
attn_output = _flash_attention_forward(
|
| 992 |
+
query_states,
|
| 993 |
+
key_states,
|
| 994 |
+
value_states,
|
| 995 |
+
attention_mask,
|
| 996 |
+
q_len,
|
| 997 |
+
dropout=dropout_rate,
|
| 998 |
+
sliding_window=getattr(self.config, "sliding_window", None),
|
| 999 |
+
is_causal=self.is_causal,
|
| 1000 |
+
use_top_left_mask=self._flash_attn_uses_top_left_mask,
|
| 1001 |
+
)
|
| 1002 |
+
|
| 1003 |
+
#attn_output = attn_output.reshape(bsz, q_len, self.hidden_size).contiguous()
|
| 1004 |
+
attn_output = attn_output.reshape(bsz, q_len, self.num_heads * self.head_dim).contiguous()
|
| 1005 |
+
attn_output = self.o_proj(attn_output)
|
| 1006 |
+
|
| 1007 |
+
if not output_attentions:
|
| 1008 |
+
attn_weights = None
|
| 1009 |
+
|
| 1010 |
+
return attn_output, attn_weights, past_key_value
|
| 1011 |
+
|
| 1012 |
+
|
| 1013 |
+
# Adapted from transformers.models.mistral.modeling_mistral.MistralSdpaAttention with Mistral->Jamba
|
| 1014 |
+
#class JambaSdpaAttention(JambaAttention):
|
| 1015 |
+
class NemotronHSdpaAttention(NemotronHAttention):
|
| 1016 |
+
"""
|
| 1017 |
+
Jamba attention module using torch.nn.functional.scaled_dot_product_attention. This module inherits from
|
| 1018 |
+
`JambaAttention` as the weights of the module stays untouched. The only changes are on the forward pass to adapt to
|
| 1019 |
+
SDPA API.
|
| 1020 |
+
"""
|
| 1021 |
+
|
| 1022 |
+
# Adapted from NemotronHAttention.forward
|
| 1023 |
+
def forward(
|
| 1024 |
+
self,
|
| 1025 |
+
hidden_states: torch.Tensor,
|
| 1026 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 1027 |
+
position_ids: Optional[torch.LongTensor] = None,
|
| 1028 |
+
past_key_value: Optional[HybridMambaAttentionDynamicCache] = None,
|
| 1029 |
+
output_attentions: bool = False,
|
| 1030 |
+
use_cache: bool = False,
|
| 1031 |
+
cache_position: Optional[torch.LongTensor] = None,
|
| 1032 |
+
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
|
| 1033 |
+
if output_attentions:
|
| 1034 |
+
# TODO: Improve this warning with e.g. `model.config.attn_implementation = "manual"` once this is implemented.
|
| 1035 |
+
logger.warning_once(
|
| 1036 |
+
"NemotronHModel is using NemotronHSdpaAttention, but `torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to the manual attention implementation, "
|
| 1037 |
+
'but specifying the manual implementation will be required from Transformers version v5.0.0 onwards. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.'
|
| 1038 |
+
)
|
| 1039 |
+
return super().forward(
|
| 1040 |
+
hidden_states=hidden_states,
|
| 1041 |
+
attention_mask=attention_mask,
|
| 1042 |
+
position_ids=position_ids,
|
| 1043 |
+
past_key_value=past_key_value,
|
| 1044 |
+
output_attentions=output_attentions,
|
| 1045 |
+
use_cache=use_cache,
|
| 1046 |
+
)
|
| 1047 |
+
|
| 1048 |
+
bsz, q_len, _ = hidden_states.size()
|
| 1049 |
+
|
| 1050 |
+
query_states = self.q_proj(hidden_states)
|
| 1051 |
+
key_states = self.k_proj(hidden_states)
|
| 1052 |
+
value_states = self.v_proj(hidden_states)
|
| 1053 |
+
|
| 1054 |
+
query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
|
| 1055 |
+
key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
|
| 1056 |
+
value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
|
| 1057 |
+
|
| 1058 |
+
if past_key_value is not None:
|
| 1059 |
+
key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx)
|
| 1060 |
+
|
| 1061 |
+
key_states = repeat_kv(key_states, self.num_key_value_groups)
|
| 1062 |
+
value_states = repeat_kv(value_states, self.num_key_value_groups)
|
| 1063 |
+
|
| 1064 |
+
causal_mask = attention_mask
|
| 1065 |
+
if attention_mask is not None:
|
| 1066 |
+
causal_mask = causal_mask[:, :, :, : key_states.shape[-2]]
|
| 1067 |
+
|
| 1068 |
+
# SDPA with memory-efficient backend is currently (torch==2.1.2) bugged with non-contiguous inputs with custom attn_mask,
|
| 1069 |
+
# Reference: https://github.com/pytorch/pytorch/issues/112577.
|
| 1070 |
+
if query_states.device.type == "cuda" and attention_mask is not None:
|
| 1071 |
+
query_states = query_states.contiguous()
|
| 1072 |
+
key_states = key_states.contiguous()
|
| 1073 |
+
value_states = value_states.contiguous()
|
| 1074 |
+
|
| 1075 |
+
# We dispatch to SDPA's Flash Attention or Efficient kernels via this `is_causal` if statement instead of an inline conditional assignment
|
| 1076 |
+
# in SDPA to support both torch.compile's dynamic shapes and full graph options. An inline conditional prevents dynamic shapes from compiling.
|
| 1077 |
+
# The q_len > 1 is necessary to match with AttentionMaskConverter.to_causal_4d that does not create a causal mask in case q_len == 1.
|
| 1078 |
+
is_causal = True if self.is_causal and causal_mask is None and q_len > 1 else False
|
| 1079 |
+
|
| 1080 |
+
attn_output = torch.nn.functional.scaled_dot_product_attention(
|
| 1081 |
+
query_states,
|
| 1082 |
+
key_states,
|
| 1083 |
+
value_states,
|
| 1084 |
+
attn_mask=causal_mask,
|
| 1085 |
+
dropout_p=self.attention_dropout if self.training else 0.0,
|
| 1086 |
+
is_causal=is_causal,
|
| 1087 |
+
)
|
| 1088 |
+
|
| 1089 |
+
attn_output = attn_output.transpose(1, 2).contiguous()
|
| 1090 |
+
attn_output = attn_output.view(bsz, q_len, self.hidden_size)
|
| 1091 |
+
|
| 1092 |
+
attn_output = self.o_proj(attn_output)
|
| 1093 |
+
|
| 1094 |
+
return attn_output, None, past_key_value
|
| 1095 |
+
|
| 1096 |
+
|
| 1097 |
+
NEMOTRONH_ATTENTION_CLASSES = {
|
| 1098 |
+
"eager": NemotronHAttention,
|
| 1099 |
+
"flash_attention_2": NemotronHFlashAttention2,
|
| 1100 |
+
"sdpa": NemotronHSdpaAttention,
|
| 1101 |
+
}
|
| 1102 |
+
|
| 1103 |
+
# Copied from transformers.models.mamba.modeling_mamba2.Mamba2PreTrainedModel
|
| 1104 |
+
class NemotronHPreTrainedModel(PreTrainedModel):
|
| 1105 |
+
"""
|
| 1106 |
+
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
|
| 1107 |
+
models.
|
| 1108 |
+
"""
|
| 1109 |
+
|
| 1110 |
+
config_class = NemotronHConfig
|
| 1111 |
+
base_model_prefix = "backbone"
|
| 1112 |
+
_no_split_modules = ["NemotronHBlock"]
|
| 1113 |
+
supports_gradient_checkpointing = True
|
| 1114 |
+
_is_stateful = True
|
| 1115 |
+
|
| 1116 |
+
def _init_weights(self, module):
|
| 1117 |
+
"""Initialize the weights."""
|
| 1118 |
+
if isinstance(module, NemotronHMamba2Mixer):
|
| 1119 |
+
module.A_log._no_weight_decay = True
|
| 1120 |
+
module.D._no_weight_decay = True
|
| 1121 |
+
|
| 1122 |
+
dt = torch.exp(
|
| 1123 |
+
torch.rand(self.config.mamba_num_heads)
|
| 1124 |
+
* (math.log(self.config.time_step_max) - math.log(self.config.time_step_min))
|
| 1125 |
+
+ math.log(self.config.time_step_min)
|
| 1126 |
+
).clamp(min=self.config.time_step_floor)
|
| 1127 |
+
|
| 1128 |
+
# # Inverse of softplus: https://github.com/pytorch/pytorch/issues/72759
|
| 1129 |
+
inv_dt = dt + torch.log(-torch.expm1(-dt))
|
| 1130 |
+
with torch.no_grad():
|
| 1131 |
+
module.dt_bias.copy_(inv_dt)
|
| 1132 |
+
module.dt_bias._no_reinit = True
|
| 1133 |
+
|
| 1134 |
+
if isinstance(module, nn.Linear):
|
| 1135 |
+
if module.bias is not None:
|
| 1136 |
+
if not getattr(module.bias, "_no_reinit", False):
|
| 1137 |
+
nn.init.zeros_(module.bias)
|
| 1138 |
+
elif isinstance(module, nn.Embedding):
|
| 1139 |
+
nn.init.normal_(module.weight, std=self.config.initializer_range)
|
| 1140 |
+
|
| 1141 |
+
# TODO: Check
|
| 1142 |
+
if self.config.rescale_prenorm_residual:
|
| 1143 |
+
# Reinitialize selected weights subject to the OpenAI GPT-2 Paper Scheme:
|
| 1144 |
+
# > A modified initialization which accounts for the accumulation on the residual path with model depth. Scale
|
| 1145 |
+
# > the weights of residual layers at initialization by a factor of 1/√N where N is the # of residual layers.
|
| 1146 |
+
# > -- GPT-2 :: https://openai.com/blog/better-language-models/
|
| 1147 |
+
#
|
| 1148 |
+
# Reference (Megatron-LM): https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/model/gpt_model.py
|
| 1149 |
+
for name, p in module.named_parameters():
|
| 1150 |
+
if name in ["out_proj.weight"]:
|
| 1151 |
+
# Special Scaled Initialization --> There are 2 Layer Norms per Transformer Block
|
| 1152 |
+
# Following Pytorch init, except scale by 1/sqrt(2 * n_layer)
|
| 1153 |
+
# We need to reinit p since this code could be called multiple times
|
| 1154 |
+
# Having just p *= scale would repeatedly scale it down
|
| 1155 |
+
nn.init.kaiming_uniform_(p, a=math.sqrt(5))
|
| 1156 |
+
with torch.no_grad():
|
| 1157 |
+
p /= math.sqrt(self.config.num_hidden_layers)
|
| 1158 |
+
|
| 1159 |
+
|
| 1160 |
+
@dataclass
|
| 1161 |
+
# Copied from transformers.models.mamba.modeling_mamba2.Mamba2Output with MAMBA2->NemotronH,Mamba2->NemotronH
|
| 1162 |
+
class NemotronHOutput(ModelOutput):
|
| 1163 |
+
"""
|
| 1164 |
+
Class for the NemotronH model outputs.
|
| 1165 |
+
|
| 1166 |
+
Args:
|
| 1167 |
+
last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
|
| 1168 |
+
Sequence of hidden-states at the output of the last layer of the model.
|
| 1169 |
+
cache_params (`HybridMambaAttentionDynamicCache`):
|
| 1170 |
+
The state of the model at the last time step. Can be used in a forward method with the next `input_ids` to
|
| 1171 |
+
avoid providing the old `input_ids`.
|
| 1172 |
+
|
| 1173 |
+
Includes both the State space model state matrices after the selective scan, and the Convolutional states
|
| 1174 |
+
hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
|
| 1175 |
+
Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
|
| 1176 |
+
one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
|
| 1177 |
+
|
| 1178 |
+
Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
|
| 1179 |
+
"""
|
| 1180 |
+
|
| 1181 |
+
last_hidden_state: Optional[torch.FloatTensor] = None
|
| 1182 |
+
cache_params: Optional[HybridMambaAttentionDynamicCache] = None
|
| 1183 |
+
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
|
| 1184 |
+
attentions: Optional[Tuple[torch.FloatTensor]] = None
|
| 1185 |
+
|
| 1186 |
+
|
| 1187 |
+
@dataclass
|
| 1188 |
+
# Copied from transformers.models.mamba2.modeling_mamba2.MambaCausalLMOutput with Mamba2->NemotronH
|
| 1189 |
+
class NemotronHCausalLMOutput(ModelOutput):
|
| 1190 |
+
"""
|
| 1191 |
+
Base class for causal language model (or autoregressive) outputs.
|
| 1192 |
+
|
| 1193 |
+
Args:
|
| 1194 |
+
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
|
| 1195 |
+
Language modeling loss (for next-token prediction).
|
| 1196 |
+
logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
|
| 1197 |
+
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
|
| 1198 |
+
cache_params (`HybridMambaAttentionDynamicCache`):
|
| 1199 |
+
The state of the model at the last time step. Can be used in a forward method with the next `input_ids` to
|
| 1200 |
+
avoid providing the old `input_ids`.
|
| 1201 |
+
|
| 1202 |
+
Includes both the State space model state matrices after the selective scan, and the Convolutional states
|
| 1203 |
+
hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
|
| 1204 |
+
Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
|
| 1205 |
+
one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
|
| 1206 |
+
|
| 1207 |
+
Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
|
| 1208 |
+
"""
|
| 1209 |
+
|
| 1210 |
+
loss: Optional[torch.FloatTensor] = None
|
| 1211 |
+
logits: Optional[torch.FloatTensor] = None
|
| 1212 |
+
cache_params: Optional[HybridMambaAttentionDynamicCache] = None
|
| 1213 |
+
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
|
| 1214 |
+
attentions: Optional[Tuple[torch.FloatTensor]] = None
|
| 1215 |
+
|
| 1216 |
+
|
| 1217 |
+
NEMOTRONH_START_DOCSTRING = r"""
|
| 1218 |
+
|
| 1219 |
+
This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
|
| 1220 |
+
library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
|
| 1221 |
+
etc.)
|
| 1222 |
+
|
| 1223 |
+
This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
|
| 1224 |
+
Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
|
| 1225 |
+
and behavior.
|
| 1226 |
+
|
| 1227 |
+
Parameters:
|
| 1228 |
+
config ([`NemotronHConfig`]): Model configuration class with all the parameters of the model.
|
| 1229 |
+
Initializing with a config file does not load the weights associated with the model, only the
|
| 1230 |
+
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
|
| 1231 |
+
"""
|
| 1232 |
+
|
| 1233 |
+
NEMOTRONH_INPUTS_DOCSTRING = r"""
|
| 1234 |
+
Args:
|
| 1235 |
+
input_ids (`torch.LongTensor` of shape `(batch_size, input_ids_length)`, *optional*):
|
| 1236 |
+
Indices of input sequence tokens in the vocabulary.
|
| 1237 |
+
|
| 1238 |
+
If `cache_params.seqlen_offset>0`, only `input_ids` that do not have their past calculated should be passed as
|
| 1239 |
+
`input_ids`.
|
| 1240 |
+
|
| 1241 |
+
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
|
| 1242 |
+
[`PreTrainedTokenizer.__call__`] for details.
|
| 1243 |
+
|
| 1244 |
+
[What are input IDs?](../glossary#input-ids)
|
| 1245 |
+
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
|
| 1246 |
+
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
|
| 1247 |
+
is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
|
| 1248 |
+
model's internal embedding lookup matrix.
|
| 1249 |
+
position_ids (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
|
| 1250 |
+
Indices of positions of each input sequence tokens in the position embeddings.
|
| 1251 |
+
cache_params (`HybridMambaAttentionDynamicCache`, *optional*):
|
| 1252 |
+
If passed along, the model uses the previous state in all the blocks (which will give the output for the
|
| 1253 |
+
`input_ids` provided as if the model add `state_input_ids + input_ids` as context).
|
| 1254 |
+
use_cache (`bool`, *optional*):
|
| 1255 |
+
If set to `True`, the `cache_params` is returned and can be used to quickly generate the next logits.
|
| 1256 |
+
output_attentions (`bool`, *optional*):
|
| 1257 |
+
Whether or not to return the attentions tensors of all attention layers.
|
| 1258 |
+
output_hidden_states (`bool`, *optional*):
|
| 1259 |
+
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
|
| 1260 |
+
more detail.
|
| 1261 |
+
return_dict (`bool`, *optional*):
|
| 1262 |
+
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
|
| 1263 |
+
cache_position (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
|
| 1264 |
+
The position of the current input in the cache. This is used to ensure that the cache is correctly updated.
|
| 1265 |
+
If `cache_params` is passed, `cache_position` should also be passed.
|
| 1266 |
+
attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
|
| 1267 |
+
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
|
| 1268 |
+
|
| 1269 |
+
- 1 for tokens that are **not masked**,
|
| 1270 |
+
- 0 for tokens that are **masked**.
|
| 1271 |
+
|
| 1272 |
+
[What are attention masks?](../glossary#attention-mask)
|
| 1273 |
+
"""
|
| 1274 |
+
|
| 1275 |
+
|
| 1276 |
+
@add_start_docstrings(
|
| 1277 |
+
"The bare NemotronH Model transformer outputting raw hidden-states without any specific head on top.",
|
| 1278 |
+
NEMOTRONH_START_DOCSTRING,
|
| 1279 |
+
)
|
| 1280 |
+
class NemotronHModel(NemotronHPreTrainedModel):
|
| 1281 |
+
def __init__(self, config):
|
| 1282 |
+
super().__init__(config)
|
| 1283 |
+
|
| 1284 |
+
self.embeddings = nn.Embedding(config.vocab_size, config.hidden_size)
|
| 1285 |
+
self.layers = nn.ModuleList([NemotronHBlock(config, layer_idx=idx) for idx in range(config.num_hidden_layers)])
|
| 1286 |
+
|
| 1287 |
+
self.gradient_checkpointing = False
|
| 1288 |
+
self.norm_f = NemotronHRMSNorm(config.hidden_size, eps=config.layer_norm_epsilon)
|
| 1289 |
+
# Initialize weights and apply final processing
|
| 1290 |
+
self._register_load_state_dict_pre_hook(self.load_hook)
|
| 1291 |
+
self.post_init()
|
| 1292 |
+
|
| 1293 |
+
def load_hook(self, state_dict, prefix, *args):
|
| 1294 |
+
for k in state_dict:
|
| 1295 |
+
if "embedding." in k:
|
| 1296 |
+
state_dict[k.replace("embedding.", "embeddings.")] = state_dict.pop(k)
|
| 1297 |
+
break
|
| 1298 |
+
|
| 1299 |
+
def get_input_embeddings(self):
|
| 1300 |
+
return self.embeddings
|
| 1301 |
+
|
| 1302 |
+
def set_input_embeddings(self, new_embeddings):
|
| 1303 |
+
self.embeddings = new_embeddings
|
| 1304 |
+
|
| 1305 |
+
@add_start_docstrings_to_model_forward(NEMOTRONH_INPUTS_DOCSTRING)
|
| 1306 |
+
@add_code_sample_docstrings(
|
| 1307 |
+
checkpoint=_CHECKPOINT_FOR_DOC,
|
| 1308 |
+
output_type=NemotronHOutput,
|
| 1309 |
+
config_class=_CONFIG_FOR_DOC,
|
| 1310 |
+
)
|
| 1311 |
+
def forward(
|
| 1312 |
+
self,
|
| 1313 |
+
input_ids: Optional[torch.LongTensor] = None,
|
| 1314 |
+
inputs_embeds: Optional[torch.LongTensor] = None,
|
| 1315 |
+
position_ids: Optional[torch.LongTensor] = None,
|
| 1316 |
+
cache_params: Optional[HybridMambaAttentionDynamicCache] = None,
|
| 1317 |
+
use_cache: Optional[bool] = None,
|
| 1318 |
+
output_attentions: Optional[bool] = None,
|
| 1319 |
+
output_hidden_states: Optional[bool] = None,
|
| 1320 |
+
return_dict: Optional[bool] = None,
|
| 1321 |
+
cache_position: Optional[torch.LongTensor] = None,
|
| 1322 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 1323 |
+
**kwargs,
|
| 1324 |
+
) -> Union[Tuple, NemotronHOutput]:
|
| 1325 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
| 1326 |
+
output_hidden_states = (
|
| 1327 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
| 1328 |
+
)
|
| 1329 |
+
# use_cache = use_cache if use_cache is not None else self.config.use_cache
|
| 1330 |
+
use_cache = use_cache if use_cache is not None else (self.config.use_cache if not self.training else False)
|
| 1331 |
+
|
| 1332 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 1333 |
+
|
| 1334 |
+
if (input_ids is None) ^ (inputs_embeds is not None): # ^ is python for xor
|
| 1335 |
+
raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
|
| 1336 |
+
|
| 1337 |
+
if inputs_embeds is None:
|
| 1338 |
+
inputs_embeds = self.embeddings(input_ids)
|
| 1339 |
+
|
| 1340 |
+
if self.gradient_checkpointing and self.training and use_cache:
|
| 1341 |
+
logger.warning_once(
|
| 1342 |
+
"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`."
|
| 1343 |
+
)
|
| 1344 |
+
use_cache = False
|
| 1345 |
+
|
| 1346 |
+
# From zamba_modeling.py
|
| 1347 |
+
if use_cache and cache_params is None:
|
| 1348 |
+
logger.warning_once(
|
| 1349 |
+
"NemotronH requires an initialized `NemotronHHybridDynamicCache` to return a cache. None was "
|
| 1350 |
+
"provided, so no cache will be returned."
|
| 1351 |
+
)
|
| 1352 |
+
|
| 1353 |
+
hidden_states = inputs_embeds
|
| 1354 |
+
|
| 1355 |
+
if cache_position is None:
|
| 1356 |
+
cache_position = torch.arange(hidden_states.shape[1], device=hidden_states.device)
|
| 1357 |
+
if position_ids is None:
|
| 1358 |
+
position_ids = cache_position.unsqueeze(0)
|
| 1359 |
+
|
| 1360 |
+
causal_mask = self._update_causal_mask(attention_mask, inputs_embeds, cache_position)
|
| 1361 |
+
mamba_mask = self._update_mamba_mask(attention_mask, cache_position)
|
| 1362 |
+
|
| 1363 |
+
all_hidden_states = () if output_hidden_states else None
|
| 1364 |
+
all_self_attns = () if output_attentions else None
|
| 1365 |
+
# Until HERE
|
| 1366 |
+
|
| 1367 |
+
for layer_idx, mixer_block in enumerate(self.layers):
|
| 1368 |
+
# Depending on the layer type we opt for 2D base attention mask (Mamba) or 4D causal mask (Attention)
|
| 1369 |
+
if mixer_block.block_type == "mamba":
|
| 1370 |
+
layer_mask = mamba_mask
|
| 1371 |
+
elif mixer_block.block_type == "attention":
|
| 1372 |
+
layer_mask = causal_mask
|
| 1373 |
+
elif mixer_block.block_type == "mlp":
|
| 1374 |
+
layer_mask = None
|
| 1375 |
+
else:
|
| 1376 |
+
raise ValueError(f"Invalid block_type: {self.block_type}")
|
| 1377 |
+
|
| 1378 |
+
if output_hidden_states:
|
| 1379 |
+
all_hidden_states += (hidden_states,)
|
| 1380 |
+
|
| 1381 |
+
if self.gradient_checkpointing and self.training:
|
| 1382 |
+
hidden_states = self._gradient_checkpointing_func(
|
| 1383 |
+
mixer_block.__call__, hidden_states, cache_params, cache_position, layer_mask
|
| 1384 |
+
)
|
| 1385 |
+
else:
|
| 1386 |
+
hidden_states = mixer_block(
|
| 1387 |
+
hidden_states,
|
| 1388 |
+
cache_params=cache_params,
|
| 1389 |
+
cache_position=cache_position,
|
| 1390 |
+
attention_mask=layer_mask,
|
| 1391 |
+
)
|
| 1392 |
+
|
| 1393 |
+
# TODO: Store attentions
|
| 1394 |
+
# if output_attentions:
|
| 1395 |
+
# if layer_outputs[1] is not None:
|
| 1396 |
+
# # append attentions only of attention layers. Mamba layers return `None` as the attention weights
|
| 1397 |
+
# all_self_attns += (layer_outputs[1],)
|
| 1398 |
+
|
| 1399 |
+
# TODO (Check): should it happen before the forward pass?
|
| 1400 |
+
# if output_hidden_states:
|
| 1401 |
+
# all_hidden_states = all_hidden_states + (hidden_states,)
|
| 1402 |
+
|
| 1403 |
+
hidden_states = self.norm_f(hidden_states)
|
| 1404 |
+
|
| 1405 |
+
if output_hidden_states:
|
| 1406 |
+
all_hidden_states = all_hidden_states + (hidden_states,)
|
| 1407 |
+
|
| 1408 |
+
if not return_dict:
|
| 1409 |
+
return tuple(v for v in [hidden_states, cache_params, all_hidden_states] if v is not None)
|
| 1410 |
+
|
| 1411 |
+
return NemotronHOutput(
|
| 1412 |
+
last_hidden_state=hidden_states,
|
| 1413 |
+
cache_params=cache_params if use_cache else None,
|
| 1414 |
+
hidden_states=all_hidden_states,
|
| 1415 |
+
attentions=all_self_attns,
|
| 1416 |
+
)
|
| 1417 |
+
|
| 1418 |
+
# Copied from transformers.models.jamba.modeling_jamba.JambaModel._update_causal_mask
|
| 1419 |
+
def _update_causal_mask(self, attention_mask, input_tensor, cache_position):
|
| 1420 |
+
if self.config._attn_implementation == "flash_attention_2":
|
| 1421 |
+
if attention_mask is not None and 0.0 in attention_mask:
|
| 1422 |
+
return attention_mask
|
| 1423 |
+
return None
|
| 1424 |
+
|
| 1425 |
+
dtype, device = input_tensor.dtype, input_tensor.device
|
| 1426 |
+
min_dtype = torch.finfo(dtype).min
|
| 1427 |
+
sequence_length = input_tensor.shape[1]
|
| 1428 |
+
target_length = cache_position[-1] + 1
|
| 1429 |
+
|
| 1430 |
+
causal_mask = torch.full((sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=device)
|
| 1431 |
+
if sequence_length != 1:
|
| 1432 |
+
causal_mask = torch.triu(causal_mask, diagonal=1)
|
| 1433 |
+
causal_mask *= torch.arange(target_length, device=device) > cache_position.reshape(-1, 1)
|
| 1434 |
+
causal_mask = causal_mask[None, None, :, :].expand(input_tensor.shape[0], 1, -1, -1)
|
| 1435 |
+
if attention_mask is not None:
|
| 1436 |
+
causal_mask = causal_mask.clone() # copy to contiguous memory for in-place edit
|
| 1437 |
+
if attention_mask.dim() == 2:
|
| 1438 |
+
mask_length = attention_mask.shape[-1]
|
| 1439 |
+
padding_mask = causal_mask[..., :mask_length].eq(0.0) * attention_mask[:, None, None, :].eq(0.0)
|
| 1440 |
+
causal_mask[..., :mask_length] = causal_mask[..., :mask_length].masked_fill(padding_mask, min_dtype)
|
| 1441 |
+
|
| 1442 |
+
if (
|
| 1443 |
+
self.config._attn_implementation == "sdpa"
|
| 1444 |
+
and attention_mask is not None
|
| 1445 |
+
and attention_mask.device.type == "cuda"
|
| 1446 |
+
):
|
| 1447 |
+
# Attend to all tokens in fully masked rows in the causal_mask, for example the relevant first rows when
|
| 1448 |
+
# using left padding. This is required by F.scaled_dot_product_attention memory-efficient attention path.
|
| 1449 |
+
# Details: https://github.com/pytorch/pytorch/issues/110213
|
| 1450 |
+
causal_mask = AttentionMaskConverter._unmask_unattended(causal_mask, min_dtype)
|
| 1451 |
+
|
| 1452 |
+
return causal_mask
|
| 1453 |
+
|
| 1454 |
+
def _update_mamba_mask(self, attention_mask, cache_position):
|
| 1455 |
+
"""
|
| 1456 |
+
No need for zeroing states when
|
| 1457 |
+
1. Cached forward
|
| 1458 |
+
2. Attending to all inputs
|
| 1459 |
+
"""
|
| 1460 |
+
mamba_mask = attention_mask
|
| 1461 |
+
if cache_position[0] > 0 or (attention_mask is not None and torch.all(attention_mask == 1)):
|
| 1462 |
+
mamba_mask = None
|
| 1463 |
+
return mamba_mask
|
| 1464 |
+
|
| 1465 |
+
|
| 1466 |
+
@add_start_docstrings(
|
| 1467 |
+
"""
|
| 1468 |
+
The NEMOTRONH Model transformer with a language modeling head on top (linear layer with weights not tied to the input
|
| 1469 |
+
embeddings).
|
| 1470 |
+
""",
|
| 1471 |
+
NEMOTRONH_START_DOCSTRING,
|
| 1472 |
+
)
|
| 1473 |
+
class NemotronHForCausalLM(NemotronHPreTrainedModel, GenerationMixin):
|
| 1474 |
+
_tied_weights_keys = ["lm_head.weight"]
|
| 1475 |
+
|
| 1476 |
+
def __init__(self, config):
|
| 1477 |
+
super().__init__(config)
|
| 1478 |
+
self.backbone = NemotronHModel(config)
|
| 1479 |
+
self.vocab_size = config.vocab_size
|
| 1480 |
+
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
|
| 1481 |
+
|
| 1482 |
+
# Initialize weights and apply final processing
|
| 1483 |
+
self.post_init()
|
| 1484 |
+
|
| 1485 |
+
def get_input_embeddings(self):
|
| 1486 |
+
return self.backbone.get_input_embeddings()
|
| 1487 |
+
|
| 1488 |
+
def set_input_embeddings(self, new_embeddings):
|
| 1489 |
+
return self.backbone.set_input_embeddings(new_embeddings)
|
| 1490 |
+
|
| 1491 |
+
def get_output_embeddings(self):
|
| 1492 |
+
return self.lm_head
|
| 1493 |
+
|
| 1494 |
+
def set_output_embeddings(self, new_embeddings):
|
| 1495 |
+
self.lm_head = new_embeddings
|
| 1496 |
+
|
| 1497 |
+
def get_decoder(self):
|
| 1498 |
+
return self.model
|
| 1499 |
+
|
| 1500 |
+
def set_decoder(self, decoder):
|
| 1501 |
+
self.model = decoder
|
| 1502 |
+
|
| 1503 |
+
def prepare_inputs_for_generation(
|
| 1504 |
+
self,
|
| 1505 |
+
input_ids,
|
| 1506 |
+
past_key_values=None,
|
| 1507 |
+
attention_mask=None,
|
| 1508 |
+
inputs_embeds=None,
|
| 1509 |
+
cache_position=None,
|
| 1510 |
+
position_ids=None,
|
| 1511 |
+
use_cache=True,
|
| 1512 |
+
**kwargs,
|
| 1513 |
+
):
|
| 1514 |
+
# Copy from https://github.com/huggingface/transformers/blob/main/src/transformers/models/jamba/modeling_jamba.py
|
| 1515 |
+
# Overwitten -- uses `cache_params` as opposed to `past_key_values`
|
| 1516 |
+
empty_past_kv = past_key_values is None
|
| 1517 |
+
|
| 1518 |
+
# If we have cache: let's slice `input_ids` through `cache_position`, to keep only the unprocessed tokens
|
| 1519 |
+
# Exception 1: when passing input_embeds, input_ids may be missing entries
|
| 1520 |
+
# Exception 2: some generation methods do special slicing of input_ids, so we don't need to do it here
|
| 1521 |
+
# Exception 3: with synced GPUs cache_position may go out of bounds, but we only want dummy token in that case.
|
| 1522 |
+
# (we can't check exception 3 while compiling)
|
| 1523 |
+
if not empty_past_kv:
|
| 1524 |
+
if (
|
| 1525 |
+
inputs_embeds is not None # Exception 1
|
| 1526 |
+
or cache_position[-1] >= input_ids.shape[1] # Exception 3
|
| 1527 |
+
):
|
| 1528 |
+
input_ids = input_ids[:, -cache_position.shape[0] :]
|
| 1529 |
+
elif input_ids.shape[1] != cache_position.shape[0]: # Default case (the "else", a no op, is Exception 2)
|
| 1530 |
+
input_ids = input_ids[:, cache_position]
|
| 1531 |
+
else:
|
| 1532 |
+
past_key_values = HybridMambaAttentionDynamicCache(
|
| 1533 |
+
self.config, input_ids.shape[0], self.dtype, device=self.device
|
| 1534 |
+
)
|
| 1535 |
+
|
| 1536 |
+
if attention_mask is not None and position_ids is None:
|
| 1537 |
+
# create position_ids on the fly for batch generation
|
| 1538 |
+
position_ids = attention_mask.long().cumsum(-1) - 1
|
| 1539 |
+
position_ids.masked_fill_(attention_mask == 0, 1)
|
| 1540 |
+
if not empty_past_kv:
|
| 1541 |
+
position_ids = position_ids[:, -input_ids.shape[1] :]
|
| 1542 |
+
|
| 1543 |
+
# if `inputs_embeds` are passed, we only want to use them in the 1st generation step
|
| 1544 |
+
if inputs_embeds is not None and empty_past_kv:
|
| 1545 |
+
if input_ids is not None and inputs_embeds.shape[1] < input_ids.shape[1]:
|
| 1546 |
+
new_token_embeds = self.get_input_embeddings()(input_ids[:,inputs_embeds.shape[1]:])
|
| 1547 |
+
inputs_embeds = torch.cat([inputs_embeds, new_token_embeds], dim=1)
|
| 1548 |
+
model_inputs = {"inputs_embeds": inputs_embeds}
|
| 1549 |
+
else:
|
| 1550 |
+
model_inputs = {"input_ids": input_ids.contiguous()} # `contiguous()` needed for compilation use cases
|
| 1551 |
+
|
| 1552 |
+
model_inputs.update(
|
| 1553 |
+
{
|
| 1554 |
+
"position_ids": position_ids,
|
| 1555 |
+
"past_key_values": past_key_values,
|
| 1556 |
+
"use_cache": use_cache,
|
| 1557 |
+
"attention_mask": attention_mask,
|
| 1558 |
+
"logits_to_keep": self.config.num_logits_to_keep,
|
| 1559 |
+
"cache_position": cache_position,
|
| 1560 |
+
}
|
| 1561 |
+
)
|
| 1562 |
+
return model_inputs
|
| 1563 |
+
|
| 1564 |
+
@add_start_docstrings_to_model_forward(NEMOTRONH_INPUTS_DOCSTRING)
|
| 1565 |
+
@add_code_sample_docstrings(
|
| 1566 |
+
checkpoint=_CHECKPOINT_FOR_DOC,
|
| 1567 |
+
output_type=NemotronHCausalLMOutput,
|
| 1568 |
+
config_class=_CONFIG_FOR_DOC,
|
| 1569 |
+
)
|
| 1570 |
+
def forward(
|
| 1571 |
+
self,
|
| 1572 |
+
input_ids: Optional[torch.LongTensor] = None,
|
| 1573 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
| 1574 |
+
position_ids: Optional[torch.LongTensor] = None,
|
| 1575 |
+
cache_params: Optional[HybridMambaAttentionDynamicCache] = None,
|
| 1576 |
+
labels: Optional[torch.LongTensor] = None,
|
| 1577 |
+
output_attentions: Optional[bool] = None,
|
| 1578 |
+
output_hidden_states: Optional[bool] = None,
|
| 1579 |
+
return_dict: Optional[bool] = None,
|
| 1580 |
+
use_cache: Optional[bool] = None,
|
| 1581 |
+
cache_position: Optional[torch.Tensor] = None,
|
| 1582 |
+
attention_mask: Optional[torch.Tensor] = None,
|
| 1583 |
+
**kwargs, # for now we need this for generation
|
| 1584 |
+
) -> Union[Tuple, NemotronHCausalLMOutput]:
|
| 1585 |
+
r"""
|
| 1586 |
+
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
|
| 1587 |
+
Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set
|
| 1588 |
+
`labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100`
|
| 1589 |
+
are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]`
|
| 1590 |
+
"""
|
| 1591 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
| 1592 |
+
|
| 1593 |
+
output_hidden_states = (
|
| 1594 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
| 1595 |
+
)
|
| 1596 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
| 1597 |
+
|
| 1598 |
+
nemotron_h_outputs = self.backbone(
|
| 1599 |
+
input_ids,
|
| 1600 |
+
cache_params=cache_params,
|
| 1601 |
+
inputs_embeds=inputs_embeds,
|
| 1602 |
+
output_attentions=output_attentions,
|
| 1603 |
+
output_hidden_states=output_hidden_states,
|
| 1604 |
+
return_dict=return_dict,
|
| 1605 |
+
use_cache=use_cache,
|
| 1606 |
+
cache_position=cache_position,
|
| 1607 |
+
attention_mask=attention_mask,
|
| 1608 |
+
)
|
| 1609 |
+
hidden_states = nemotron_h_outputs[0]
|
| 1610 |
+
|
| 1611 |
+
# TODO: Check zamba_modeling.py: https://github.com/huggingface/transformers/blob/d7188ba600e36d3fd191b12e19f1b3bb81a8404f/src/transformers/models/zamba/modeling_zamba.py#L1284C1-L1286C2
|
| 1612 |
+
#logits = self.lm_head(hidden_states.to(self.lm_head.weight.dtype)).float()
|
| 1613 |
+
logits = self.lm_head(hidden_states.to(self.lm_head.weight.dtype)).float()
|
| 1614 |
+
|
| 1615 |
+
loss = None
|
| 1616 |
+
if labels is not None:
|
| 1617 |
+
# move labels to correct device to enable model parallelism
|
| 1618 |
+
labels = labels.to(logits.device)
|
| 1619 |
+
# Shift so that tokens < n predict n
|
| 1620 |
+
shift_logits = logits[..., :-1, :].contiguous()
|
| 1621 |
+
shift_labels = labels[..., 1:].contiguous()
|
| 1622 |
+
# Flatten the tokens
|
| 1623 |
+
loss_fct = CrossEntropyLoss()
|
| 1624 |
+
loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))
|
| 1625 |
+
|
| 1626 |
+
if not return_dict:
|
| 1627 |
+
output = (logits,) + nemotron_h_outputs[1:]
|
| 1628 |
+
return ((loss,) + output) if loss is not None else output
|
| 1629 |
+
|
| 1630 |
+
return NemotronHCausalLMOutput(
|
| 1631 |
+
loss=loss,
|
| 1632 |
+
logits=logits,
|
| 1633 |
+
cache_params=nemotron_h_outputs.cache_params,
|
| 1634 |
+
hidden_states=nemotron_h_outputs.hidden_states,
|
| 1635 |
+
attentions=nemotron_h_outputs.attentions,
|
| 1636 |
+
)
|
nano_v2_inference_chat_template.jinja
ADDED
|
@@ -0,0 +1,125 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{%- set ns = namespace(enable_thinking=true) -%}
|
| 2 |
+
{%- for message in messages -%}
|
| 3 |
+
{%- if message['content'] is string -%}
|
| 4 |
+
{%- if message['role'] == 'user' or message['role'] == 'system' -%}
|
| 5 |
+
{%- if '/think' in message['content'] -%}
|
| 6 |
+
{%- set ns.enable_thinking = true -%}
|
| 7 |
+
{%- elif '/no_think' in message['content'] -%}
|
| 8 |
+
{%- set ns.enable_thinking = false -%}
|
| 9 |
+
{%- endif -%}
|
| 10 |
+
{%- endif -%}
|
| 11 |
+
{%- else -%}
|
| 12 |
+
{%- for content in message['content'] -%}
|
| 13 |
+
{%- if content['type'] == 'text' -%}
|
| 14 |
+
{%- if message['role'] == 'user' or message['role'] == 'system' -%}
|
| 15 |
+
{%- if '/think' in content['text'] -%}
|
| 16 |
+
{%- set ns.enable_thinking = true -%}
|
| 17 |
+
{%- elif '/no_think' in content['text'] -%}
|
| 18 |
+
{%- set ns.enable_thinking = false -%}
|
| 19 |
+
{%- endif -%}
|
| 20 |
+
{%- endif -%}
|
| 21 |
+
{%- endif -%}
|
| 22 |
+
{%- endfor -%}
|
| 23 |
+
{%- endif -%}
|
| 24 |
+
{%- endfor -%}
|
| 25 |
+
{%- for message in messages -%}
|
| 26 |
+
{%- if loop.first -%}
|
| 27 |
+
{%- if message['role'] != 'system' -%}
|
| 28 |
+
{{- '<SPECIAL_10>System\n\n' }}
|
| 29 |
+
{%- endif -%}
|
| 30 |
+
{%- endif -%}
|
| 31 |
+
|
| 32 |
+
{%- if message['role'] == 'system' -%}
|
| 33 |
+
{{- '<SPECIAL_10>System\n' }}
|
| 34 |
+
{%- if message['content'] is string -%}
|
| 35 |
+
{{- message['content'].replace('/think', '').replace('/no_think', '').strip() }}
|
| 36 |
+
{%- else -%}
|
| 37 |
+
{%- for content in message['content'] -%}
|
| 38 |
+
{%- if content['type'] == 'image' -%}
|
| 39 |
+
{{- '' }}
|
| 40 |
+
{%- elif content['type'] == 'text' -%}
|
| 41 |
+
{{- content['text'].replace('/think', '').replace('/no_think', '').strip() }}
|
| 42 |
+
{%- endif -%}
|
| 43 |
+
{%- endfor -%}
|
| 44 |
+
{%- endif -%}
|
| 45 |
+
|
| 46 |
+
{%- if tools -%}
|
| 47 |
+
{%- if message['content'].replace('/think', '').replace('/no_think', '').strip() != '' -%}
|
| 48 |
+
{{- '\n\n' }}
|
| 49 |
+
{%- endif -%}
|
| 50 |
+
|
| 51 |
+
{{- 'You can use the following tools to assist the user if required:\n<AVAILABLE_TOOLS>[' }}
|
| 52 |
+
{%- for tool in tools -%}
|
| 53 |
+
{{- (tool.function if tool.function is defined else tool) | tojson -}}
|
| 54 |
+
{{- ', ' if not loop.last else '' -}}
|
| 55 |
+
{%- endfor -%}
|
| 56 |
+
{{- ']</AVAILABLE_TOOLS>\n\nIf you decide to call any tool(s), use the following format:\n<TOOLCALL>[{{\"name\": \"tool_name1\", \"arguments\": "\tool_args1\"}}, {{\"name\": \"tool_name2\", \"arguments\": \"tool_args2\"}}]</TOOLCALL>\n\nThe user will execute tool-calls and return responses from tool(s) in this format:\n<TOOL_RESPONSE>[{{\"tool_response1\"}}, {{\"tool_response2\"}}]</TOOL_RESPONSE>\n\nBased on the tool responses, you can call additional tools if needed, correct tool calls if any errors are found, or just respond to the user.' -}}
|
| 57 |
+
{%- endif -%}
|
| 58 |
+
{{- '\n' -}}
|
| 59 |
+
|
| 60 |
+
{%- elif message['role'] == 'user' -%}
|
| 61 |
+
{{- '<SPECIAL_11>User\n' }}
|
| 62 |
+
{%- if message['content'] is string -%}
|
| 63 |
+
{{- message['content'].replace('/think', '').replace('/no_think', '').strip() }}
|
| 64 |
+
{%- else -%}
|
| 65 |
+
{%- for content in message['content'] -%}
|
| 66 |
+
{%- if content['type'] == 'image' -%}
|
| 67 |
+
{{- '' }}
|
| 68 |
+
{%- elif content['type'] == 'text' -%}
|
| 69 |
+
{{- content['text'].replace('/think', '').replace('/no_think', '').strip() }}
|
| 70 |
+
{%- endif -%}
|
| 71 |
+
{%- endfor -%}
|
| 72 |
+
{%- endif -%}
|
| 73 |
+
{{- '\n' -}}
|
| 74 |
+
|
| 75 |
+
{%- elif message['role'] == 'tool' -%}
|
| 76 |
+
{%- if loop.first or (messages[loop.index0 - 1].role != 'tool') -%}
|
| 77 |
+
{{- '<SPECIAL_11>User\n<TOOL_RESPONSE>[' }}
|
| 78 |
+
{%- endif -%}
|
| 79 |
+
|
| 80 |
+
{{- message.content }}
|
| 81 |
+
{{- ', ' if not loop.last and (messages[loop.index0 + 1].role == 'tool') else '' -}}
|
| 82 |
+
|
| 83 |
+
{%- if loop.last or (messages[loop.index0 + 1].role != 'tool') -%}
|
| 84 |
+
{{- ']</TOOL_RESPONSE>\n' -}}
|
| 85 |
+
{%- endif -%}
|
| 86 |
+
|
| 87 |
+
{%- elif message['role'] == 'assistant' -%}
|
| 88 |
+
{%- if '</think>' in content -%}
|
| 89 |
+
{%- set content = content.split('</think>')[1].strip() -%}
|
| 90 |
+
{%- endif -%}
|
| 91 |
+
|
| 92 |
+
{{- '<SPECIAL_11>Assistant\n' + content.strip() }}
|
| 93 |
+
|
| 94 |
+
{%- if message.tool_calls -%}
|
| 95 |
+
{%- if content.strip() != '' -%}
|
| 96 |
+
{{- '\n\n' -}}
|
| 97 |
+
{%- endif -%}
|
| 98 |
+
|
| 99 |
+
{{- '<TOOLCALL>[' -}}
|
| 100 |
+
{%- for call in message.tool_calls -%}
|
| 101 |
+
{%- set fn = call.function if call.function is defined else call -%}
|
| 102 |
+
{{- '{"name": "' + fn.name + '", "arguments": ' -}}
|
| 103 |
+
{%- if fn.arguments is string -%}
|
| 104 |
+
{{- fn.arguments -}}
|
| 105 |
+
{%- else -%}
|
| 106 |
+
{{- fn.arguments | tojson -}}
|
| 107 |
+
{%- endif -%}
|
| 108 |
+
{{- '}' + (', ' if not loop.last else '') -}}
|
| 109 |
+
{%- endfor -%}
|
| 110 |
+
{{- ']</TOOLCALL>' -}}
|
| 111 |
+
{%- endif -%}
|
| 112 |
+
{{- '\n<SPECIAL_12>\n' -}}
|
| 113 |
+
{%- endif -%}
|
| 114 |
+
{%- endfor -%}
|
| 115 |
+
{%- if not add_generation_prompt is defined -%}
|
| 116 |
+
{%- set add_generation_prompt = false -%}
|
| 117 |
+
{%- endif -%}
|
| 118 |
+
{%- if add_generation_prompt -%}
|
| 119 |
+
{{- '<SPECIAL_11>Assistant\n' }}
|
| 120 |
+
{%- if ns.enable_thinking is defined and ns.enable_thinking is false -%}
|
| 121 |
+
{{- '<think></think>' }}
|
| 122 |
+
{%- else -%}
|
| 123 |
+
{{- '<think>\n' }}
|
| 124 |
+
{%- endif -%}
|
| 125 |
+
{%- endif -%}
|
nano_v2_llm_template.jinja
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
{%- for message in messages %}{%- set content = message['content'] %}{%- if message['role'] == 'system' %}{{- '<SPECIAL_10>System\n' + content.replace('/think', '').replace('/no_think', '').strip() }}{%- if tools -%}{%- if content.replace('/think', '').replace('/no_think', '').strip() != '' -%}{{- '\n\n' -}}{%- endif -%}{{- 'You can use the following tools to assist the user if required:\n<AVAILABLE_TOOLS>[' -}}{%- for tool in tools -%}{{- (tool.function if tool.function is defined else tool) | tojson -}}{{- ', ' if not loop.last else '' -}}{%- endfor -%}{{- ']</AVAILABLE_TOOLS>\n\nIf you decide to call any tool(s), use the following format:\n<TOOLCALL>[{{\"name\": \"tool_name1\", \"arguments\": \"tool_args1\"}}, {{\"name\": \"tool_name2\", \"arguments\": \"tool_args2\"}}]</TOOLCALL>\n\nThe user will execute tool-calls and return responses from tool(s) in this format:\n<TOOL_RESPONSE>[{{\"tool_response1\"}}, {{\"tool_response2\"}}]</TOOL_RESPONSE>\n\nBased on the tool responses, you can call additional tools if needed, correct tool calls if any errors are found, or just respond to the user.' -}}{%- endif -%}{{- '\n' -}}{%- elif message['role'] == 'user' %}{{- '<SPECIAL_11>User\n' + content.replace('/think', '').replace('/no_think', '').strip() + '\n' }}{%- elif message['role'] == 'tool' %}{%- if loop.first or (messages[loop.index0 - 1].role != 'tool') -%}{{- '<SPECIAL_11>User\n' + '<TOOL_RESPONSE>[' }}{%- endif -%}{{- message.content -}}{{- ', ' if not loop.last and (messages[loop.index0 + 1].role == 'tool') else '' -}}{%- if loop.last or (messages[loop.index0 + 1].role != 'tool') -%}{{- ']</TOOL_RESPONSE>\n' -}}{%- endif -%}{%- elif message['role'] == 'assistant' %}{%- if '</think>' in content %}{%- set content = content.split('</think>')[1].strip() %}{%- endif %}{{- '<SPECIAL_11>Assistant\n' + content.strip() }}{%- if message.tool_calls -%}{%- if content.strip() != '' -%}{{- '\n\n' -}}{%- endif -%}{{- '<TOOLCALL>[' -}}{%- for call in message.tool_calls -%}{%- set fn = call.function if call.function is defined else call -%}{{- '{\"name\": \"' + fn.name + '\", \"arguments\": ' -}}{%- if fn.arguments is string -%}{{- fn.arguments -}}{%- else -%}{{- fn.arguments | tojson -}}{%- endif -%}{{- '}' + (', ' if not loop.last else '') -}}{%- endfor -%}{{- ']</TOOLCALL>' -}}{%- endif -%}{{- '\n<SPECIAL_12>\n' -}}{%- endif %}{%- endfor %}{%- set ns = namespace(enable_thinking=true) %}{%- for message in messages %}{%- set content = message['content'] %}{%- if message['role'] == 'user' or message['role'] == 'system' %}{%- if '/think' in content %}{%- set ns.enable_thinking = true %}{%- elif '/no_think' in content %}{%- set ns.enable_thinking = false %}{%- endif %}{%- endif %}{%- endfor %}{%- if add_generation_prompt %}{{- '<SPECIAL_11>Assistant\n' }}{%- if ns.enable_thinking is defined and ns.enable_thinking is false %}{{- '<think></think>' }}{%- else %}{{- '<think>\n' }}{%- endif %}{%- endif %}
|
non_reasoning_nano_v2_inference_chat_template.jinja
ADDED
|
@@ -0,0 +1,118 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{%- for message in messages -%}
|
| 2 |
+
{%- if loop.first -%}
|
| 3 |
+
{%- if message['role'] != 'system' -%}
|
| 4 |
+
{{ '<SPECIAL_10>System\n\n' }}
|
| 5 |
+
{%- endif -%}
|
| 6 |
+
{%- endif -%}
|
| 7 |
+
|
| 8 |
+
{%- if message['role'] == 'system' -%}
|
| 9 |
+
{{ '<SPECIAL_10>System\n' }}
|
| 10 |
+
{%- if message['content'] is string -%}
|
| 11 |
+
{{ message['content'].replace('/think', '').replace('/no_think', '').strip() }}
|
| 12 |
+
{%- else -%}
|
| 13 |
+
{%- for content in message['content'] -%}
|
| 14 |
+
{%- if content['type'] == 'image' -%}
|
| 15 |
+
{{ '' }}
|
| 16 |
+
{%- elif content['type'] == 'text' -%}
|
| 17 |
+
{{ content['text'].replace('/think', '').replace('/no_think', '').strip() }}
|
| 18 |
+
{%- endif -%}
|
| 19 |
+
{%- endfor -%}
|
| 20 |
+
{%- endif -%}
|
| 21 |
+
{{ '\n' }}
|
| 22 |
+
|
| 23 |
+
{%- if tools -%}
|
| 24 |
+
{%- if message['content'].replace('/think', '').replace('/no_think', '').strip() != '' -%}
|
| 25 |
+
{{ '\n\n' }}
|
| 26 |
+
{%- endif -%}
|
| 27 |
+
|
| 28 |
+
{{ 'You can use the following tools to assist the user if required:\n<AVAILABLE_TOOLS>[' }}
|
| 29 |
+
{%- for tool in tools -%}
|
| 30 |
+
{{- (tool.function if tool.function is defined else tool) | tojson -}}
|
| 31 |
+
{{ ', ' if not loop.last else '' }}
|
| 32 |
+
{%- endfor -%}
|
| 33 |
+
{{ ']</AVAILABLE_TOOLS>\n\n' }}
|
| 34 |
+
|
| 35 |
+
{{ 'If you decide to call any tool(s), use the following format:\n' }}
|
| 36 |
+
{{ '<TOOLCALL>[{{"name": "tool_name1", "arguments": "tool_args1"}}, {{"name": "tool_name2", "arguments": "tool_args2"}}]</TOOLCALL>\n\n' }}
|
| 37 |
+
|
| 38 |
+
{{ 'The user will execute tool-calls and return responses from tool(s) in this format:\n' }}
|
| 39 |
+
{{ '<TOOL_RESPONSE>[{{"tool_response1"}}, {{"tool_response2"}}]</TOOL_RESPONSE>\n\n' }}
|
| 40 |
+
|
| 41 |
+
{{ 'Based on the tool responses, you can call additional tools if needed, correct tool calls if any errors are found, or just respond to the user.\n' }}
|
| 42 |
+
{%- endif -%}
|
| 43 |
+
|
| 44 |
+
{%- elif message['role'] == 'user' -%}
|
| 45 |
+
{{ '<SPECIAL_11>User\n' }}
|
| 46 |
+
{%- if message['content'] is string -%}
|
| 47 |
+
{{ message['content'].replace('/think', '').replace('/no_think', '').strip() }}
|
| 48 |
+
{%- else -%}
|
| 49 |
+
{%- for content in message['content'] -%}
|
| 50 |
+
{%- if content['type'] == 'image' -%}
|
| 51 |
+
{{ '' }}
|
| 52 |
+
{%- elif content['type'] == 'text' -%}
|
| 53 |
+
{{ content['text'].replace('/think', '').replace('/no_think', '').strip() }}
|
| 54 |
+
{%- endif -%}
|
| 55 |
+
{%- endfor -%}
|
| 56 |
+
{%- endif -%}
|
| 57 |
+
{{ '\n' }}
|
| 58 |
+
|
| 59 |
+
{%- elif message['role'] == 'tool' -%}
|
| 60 |
+
{%- if loop.first or (messages[loop.index0 - 1].role != 'tool') -%}
|
| 61 |
+
{{ '<SPECIAL_11>User\n<TOOL_RESPONSE>[' }}
|
| 62 |
+
{%- endif -%}
|
| 63 |
+
|
| 64 |
+
{{ message.content }}
|
| 65 |
+
{{ ', ' if not loop.last and (messages[loop.index0 + 1].role == 'tool') else '' }}
|
| 66 |
+
|
| 67 |
+
{%- if loop.last or (messages[loop.index0 + 1].role != 'tool') -%}
|
| 68 |
+
{{ ']</TOOL_RESPONSE>\n' }}
|
| 69 |
+
{%- endif -%}
|
| 70 |
+
|
| 71 |
+
{%- elif message['role'] == 'assistant' -%}
|
| 72 |
+
{%- if '</think>' in content -%}
|
| 73 |
+
{%- set content = content.split('</think>')[1].strip() -%}
|
| 74 |
+
{%- endif -%}
|
| 75 |
+
|
| 76 |
+
{{ '<SPECIAL_11>Assistant\n' + content.strip() }}
|
| 77 |
+
|
| 78 |
+
{%- if message.tool_calls -%}
|
| 79 |
+
{%- if content.strip() != '' -%}
|
| 80 |
+
{{ '\n\n' }}
|
| 81 |
+
{%- endif -%}
|
| 82 |
+
|
| 83 |
+
{{ '<TOOLCALL>[' }}
|
| 84 |
+
{%- for call in message.tool_calls -%}
|
| 85 |
+
{%- set fn = call.function if call.function is defined else call -%}
|
| 86 |
+
{{ '{"name": "' + fn.name + '", "arguments": ' }}
|
| 87 |
+
{%- if fn.arguments is string -%}
|
| 88 |
+
{{- fn.arguments -}}
|
| 89 |
+
{%- else -%}
|
| 90 |
+
{{- fn.arguments | tojson -}}
|
| 91 |
+
{%- endif -%}
|
| 92 |
+
{{ '}' + (', ' if not loop.last else '') }}
|
| 93 |
+
{%- endfor -%}
|
| 94 |
+
{{ ']</TOOLCALL>' }}
|
| 95 |
+
{%- endif -%}
|
| 96 |
+
{{ '\n<SPECIAL_12>\n' }}
|
| 97 |
+
{%- endif -%}
|
| 98 |
+
{%- endfor -%}
|
| 99 |
+
|
| 100 |
+
{%- set ns = namespace(enable_thinking=true) -%}
|
| 101 |
+
{%- for message in messages -%}
|
| 102 |
+
{%- set content = message['content'] -%}
|
| 103 |
+
{%- if message['role'] == 'user' or message['role'] == 'system' -%}
|
| 104 |
+
{%- if '/think' in content -%}
|
| 105 |
+
{%- set ns.enable_thinking = true -%}
|
| 106 |
+
{%- elif '/no_think' in content -%}
|
| 107 |
+
{%- set ns.enable_thinking = false -%}
|
| 108 |
+
{%- endif -%}
|
| 109 |
+
{%- endif -%}
|
| 110 |
+
{%- endfor -%}
|
| 111 |
+
|
| 112 |
+
{%- if not add_generation_prompt is defined -%}
|
| 113 |
+
{%- set add_generation_prompt = false -%}
|
| 114 |
+
{%- endif -%}
|
| 115 |
+
{%- if add_generation_prompt -%}
|
| 116 |
+
{{ '<SPECIAL_11>Assistant\n' }}
|
| 117 |
+
{{ '<think></think>' }}
|
| 118 |
+
{%- endif -%}
|
preprocessor_config.json
ADDED
|
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"image_processor_type": "NemotronNanoVLV2ImageProcessor",
|
| 3 |
+
"auto_map": {
|
| 4 |
+
"AutoImageProcessor": "image_processing.NemotronNanoVLV2ImageProcessor",
|
| 5 |
+
"AutoVideoProcessor": "video_processing.NemotronNanoVLV2VideoProcessor",
|
| 6 |
+
"AutoProcessor": "processing.NemotronNanoVLV2Processor"
|
| 7 |
+
},
|
| 8 |
+
"image_size": 512,
|
| 9 |
+
"patch_size": 16,
|
| 10 |
+
"downsample_ratio": 0.5,
|
| 11 |
+
"max_num_tiles": 12,
|
| 12 |
+
"use_thumbnail": true,
|
| 13 |
+
"norm_mean": [0.48145466, 0.4578275, 0.40821073],
|
| 14 |
+
"norm_std": [0.26862954, 0.26130258, 0.27577711]
|
| 15 |
+
}
|
privacy.md
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Field | Response
|
| 2 |
+
:----------------------------------------------------------------------------------------------------------------------------------|:-----------------------------------------------
|
| 3 |
+
Generatable or reverse engineerable personal data? | No
|
| 4 |
+
Personal data used to create this model? | No
|
| 5 |
+
Was consent obtained for any personal data used? | Not Applicable
|
| 6 |
+
A description of any methods implemented in data acquisition or processing, if any, to address the prevalence of personal data in the training data, where relevant and applicable. | We used only prompts that do not contain any personal data for synthetic data generation.
|
| 7 |
+
How often is dataset reviewed? | Before release and during dataset creation and model training <br><br>
|
| 8 |
+
Is there provenance for all datasets used in training? | Yes
|
| 9 |
+
Does data labeling (annotation, metadata) comply with privacy laws? | Yes
|
| 10 |
+
Is data compliant with data subject requests for data correction or removal, if such a request was made? | No, not possible with externally-sourced data.
|
| 11 |
+
Applicable Privacy Policy | [Privacy Policy](https://www.nvidia.com/en-us/about-nvidia/privacy-policy/)
|
| 12 |
+
During AI model development, strict adherence to copyright policy ensured compliance through risk mitigation and legal reviews. Post-data collection, reserved rights content is identified and removed, with verified opt-out processes for rightsholders. Detailed records document due diligence and transparency.
|
| 13 |
+
We employ automated tools and data processing techniques to scan for Personally Identifiable Information (PII) during pre-training to identify and filter certain categories of personal information, including public-facing contact details such as email addresses and phone numbers. Scans of Common Crawl, CC-News, and Wikimedia datasets did not detect PII in the majority of samples. However, Microsoft Presidio indicated potential findings including business contact information embedded in natural language, such as email addresses and phone numbers. These were removed using verified instances of PII through a combination of automated filtering and human-in-the-loop validation.
|
processing.py
ADDED
|
@@ -0,0 +1,261 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Optional, Union, List
|
| 2 |
+
|
| 3 |
+
import numpy as np
|
| 4 |
+
|
| 5 |
+
from transformers.feature_extraction_utils import BatchFeature
|
| 6 |
+
from transformers.image_utils import ImageInput
|
| 7 |
+
from transformers.processing_utils import ImagesKwargs, MultiModalData, ProcessingKwargs, ProcessorMixin, Unpack, VideosKwargs
|
| 8 |
+
from transformers.tokenization_utils_base import PreTokenizedInput, TextInput
|
| 9 |
+
from transformers.video_utils import VideoInput
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
class NemotronNanoVLV2ImagesKwargs(ImagesKwargs):
|
| 13 |
+
min_pixels: Optional[int]
|
| 14 |
+
max_pixels: Optional[int]
|
| 15 |
+
patch_size: Optional[int]
|
| 16 |
+
temporal_patch_size: Optional[int]
|
| 17 |
+
merge_size: Optional[int]
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
class NemotronNanoVLV2ProcessorKwargs(ProcessingKwargs, total=False):
|
| 21 |
+
images_kwargs: NemotronNanoVLV2ImagesKwargs
|
| 22 |
+
videos_kwargs: VideosKwargs
|
| 23 |
+
_defaults = {
|
| 24 |
+
"text_kwargs": {
|
| 25 |
+
"padding": False,
|
| 26 |
+
},
|
| 27 |
+
}
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
class NemotronNanoVLV2Processor(ProcessorMixin):
|
| 31 |
+
r"""
|
| 32 |
+
Constructs a Nemotron Nano VL V2 processor which wraps an image processor and a tokenizer into a single processor.
|
| 33 |
+
[`NemotronNanoVLV2Processor`] offers all the functionalities of the image processor and tokenizer. See the
|
| 34 |
+
[`~NemotronNanoVLV2Processor.__call__`] and [`~NemotronNanoVLV2Processor.decode`] for more information.
|
| 35 |
+
Args:
|
| 36 |
+
image_processor ([`AutoImageProcessor`], *optional*):
|
| 37 |
+
The image processor is a required input.
|
| 38 |
+
tokenizer ([`AutoTokenizer`], *optional*):
|
| 39 |
+
The tokenizer is a required input.
|
| 40 |
+
chat_template (`str`, *optional*): A Jinja template which will be used to convert lists of messages
|
| 41 |
+
in a chat into a tokenizable string.
|
| 42 |
+
"""
|
| 43 |
+
|
| 44 |
+
attributes = ["image_processor", "tokenizer"]
|
| 45 |
+
|
| 46 |
+
image_processor_class = "AutoImageProcessor"
|
| 47 |
+
video_processor_class = "AutoVideoProcessor"
|
| 48 |
+
tokenizer_class = ("AutoTokenizer")
|
| 49 |
+
|
| 50 |
+
def __init__(self, image_processor=None, tokenizer=None, chat_template=None, **kwargs):
|
| 51 |
+
self.image_token = "<image>" if not hasattr(tokenizer, "image_token") else tokenizer.image_token
|
| 52 |
+
self.video_token = "<video>" if not hasattr(tokenizer, "video_token") else tokenizer.video_token
|
| 53 |
+
self.image_start_token = "<img>" if not hasattr(tokenizer, "image_start_token") else tokenizer.image_start_token
|
| 54 |
+
self.image_end_token = "</img>" if not hasattr(tokenizer, "image_end_token") else tokenizer.image_end_token
|
| 55 |
+
self.image_token_id = (
|
| 56 |
+
tokenizer.image_token_id
|
| 57 |
+
if getattr(tokenizer, "image_token_id", None)
|
| 58 |
+
else tokenizer.convert_tokens_to_ids(self.image_token)
|
| 59 |
+
)
|
| 60 |
+
self.video_token_id = (
|
| 61 |
+
tokenizer.video_token_id
|
| 62 |
+
if getattr(tokenizer, "video_token_id", None)
|
| 63 |
+
else tokenizer.convert_tokens_to_ids(self.video_token)
|
| 64 |
+
)
|
| 65 |
+
super().__init__(image_processor, tokenizer, chat_template=chat_template)
|
| 66 |
+
|
| 67 |
+
def __call__(
|
| 68 |
+
self,
|
| 69 |
+
images: ImageInput = None,
|
| 70 |
+
text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None,
|
| 71 |
+
videos: VideoInput = None,
|
| 72 |
+
**kwargs: Unpack[NemotronNanoVLV2ProcessorKwargs],
|
| 73 |
+
) -> BatchFeature:
|
| 74 |
+
"""
|
| 75 |
+
Main method to prepare multimodal inputs (text, images, videos) for the model. This method processes text by
|
| 76 |
+
replacing image/video tokens with appropriate placeholder sequences, processes images and videos through the
|
| 77 |
+
image processor, and tokenizes the final text.
|
| 78 |
+
|
| 79 |
+
The method performs the following key operations:
|
| 80 |
+
1. Processes images using the image processor to get pixel values and patch counts
|
| 81 |
+
2. Processes videos using the image processor with max_num_tiles=1 to get video pixel values
|
| 82 |
+
3. Replaces `<image>` tokens in text with `<img>` + image tokens + `</img>` sequences
|
| 83 |
+
4. Replaces `<video>` tokens in text with frame-by-frame descriptions including timestamps (if metadata provided)
|
| 84 |
+
5. Tokenizes the processed text and combines all outputs
|
| 85 |
+
|
| 86 |
+
Args:
|
| 87 |
+
images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `List[PIL.Image.Image]`, `List[np.ndarray]`, `List[torch.Tensor]`, *optional*):
|
| 88 |
+
The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch
|
| 89 |
+
tensor. Both channels-first and channels-last formats are supported.
|
| 90 |
+
text (`str`, `List[str]`, *optional*):
|
| 91 |
+
The sequence or batch of sequences to be encoded. Each sequence should be a string. The text can contain
|
| 92 |
+
special tokens `<image>` and `<video>` that will be replaced with appropriate token sequences.
|
| 93 |
+
videos (`np.ndarray`, `torch.Tensor`, `List[np.ndarray]`, `List[torch.Tensor]`, *optional*):
|
| 94 |
+
The video or batch of videos to be prepared. Each video should be a 4D NumPy array or PyTorch
|
| 95 |
+
tensor with shape (num_frames, channels, height, width). Both channels-first and channels-last formats
|
| 96 |
+
are supported. Note: Currently only supports batch size of 1 for videos.
|
| 97 |
+
images_kwargs (`Dict`, *optional*):
|
| 98 |
+
Additional keyword arguments for image processing, including:
|
| 99 |
+
- `min_pixels` (`int`, *optional*): Minimum number of pixels for image processing
|
| 100 |
+
- `max_pixels` (`int`, *optional*): Maximum number of pixels for image processing
|
| 101 |
+
- `patch_size` (`int`, *optional*): Size of patches for image processing
|
| 102 |
+
- `temporal_patch_size` (`int`, *optional*): Size of temporal patches
|
| 103 |
+
- `merge_size` (`int`, *optional*): Size for merging patches
|
| 104 |
+
videos_kwargs (`Dict`, *optional*):
|
| 105 |
+
Additional keyword arguments for video processing, including:
|
| 106 |
+
- `video_metadata` (`VideoMetadata`, *optional*): Metadata containing fps information for timestamp calculation
|
| 107 |
+
text_kwargs (`Dict`, *optional*):
|
| 108 |
+
Additional keyword arguments for text tokenization, including:
|
| 109 |
+
- `return_tensors` (`str` or [`~utils.TensorType`], *optional*): Framework for returned tensors ('tf', 'pt', 'np', 'jax')
|
| 110 |
+
- `padding` (`bool`, *optional*): Whether to pad sequences (defaults to False)
|
| 111 |
+
|
| 112 |
+
Returns:
|
| 113 |
+
[`BatchFeature`]: A [`BatchFeature`] with the following fields:
|
| 114 |
+
|
| 115 |
+
- **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`.
|
| 116 |
+
- **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when
|
| 117 |
+
`return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not
|
| 118 |
+
`None`).
|
| 119 |
+
- **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`.
|
| 120 |
+
- **num_patches** -- Number of patches per image. Returned when `images` is not `None`.
|
| 121 |
+
- **pixel_values_videos** -- Pixel values of videos to be fed to a model. Returned when `videos` is not `None`.
|
| 122 |
+
|
| 123 |
+
Raises:
|
| 124 |
+
AssertionError: If videos are provided with batch size > 1 (not currently supported).
|
| 125 |
+
|
| 126 |
+
Note:
|
| 127 |
+
- Image tokens `<image>` in text are replaced with `<img>` + repeated image tokens + `</img>`
|
| 128 |
+
- Video tokens `<video>` in text are replaced with frame-by-frame descriptions
|
| 129 |
+
- When video metadata with fps is provided, frame descriptions include timestamps
|
| 130 |
+
- Videos are processed with max_num_tiles=1 regardless of the images setting
|
| 131 |
+
"""
|
| 132 |
+
output_kwargs = self._merge_kwargs(
|
| 133 |
+
NemotronNanoVLV2ProcessorKwargs,
|
| 134 |
+
tokenizer_init_kwargs=self.tokenizer.init_kwargs,
|
| 135 |
+
**kwargs,
|
| 136 |
+
)
|
| 137 |
+
image_inputs = videos_inputs = {}
|
| 138 |
+
if images is not None:
|
| 139 |
+
image_inputs = self.image_processor(images=images, **output_kwargs["images_kwargs"])
|
| 140 |
+
image_num_patches = image_inputs["num_patches"]
|
| 141 |
+
|
| 142 |
+
if videos is not None:
|
| 143 |
+
orig_tiles = self.image_processor.max_num_tiles
|
| 144 |
+
self.image_processor.max_num_tiles = 1
|
| 145 |
+
videos_inputs = self.image_processor(images=videos, **output_kwargs["images_kwargs"])
|
| 146 |
+
self.image_processor.max_num_tiles = orig_tiles
|
| 147 |
+
video_num_patches = [sum(videos_inputs["num_patches"])]
|
| 148 |
+
videos_inputs["pixel_values_videos"] = videos_inputs["pixel_values"]
|
| 149 |
+
del videos_inputs["pixel_values"]
|
| 150 |
+
|
| 151 |
+
if not isinstance(text, list):
|
| 152 |
+
text = [text]
|
| 153 |
+
|
| 154 |
+
text = text.copy() # below lines change text in-place
|
| 155 |
+
if images is not None:
|
| 156 |
+
index = 0
|
| 157 |
+
for i in range(len(text)):
|
| 158 |
+
while self.image_token in text[i]:
|
| 159 |
+
text[i] = text[i].replace(self.image_token, self.image_start_token + "<|placeholder|>" * image_num_patches[index] * self.image_processor.num_image_token + self.image_end_token, 1)
|
| 160 |
+
index += 1
|
| 161 |
+
text[i] = text[i].replace("<|placeholder|>", self.image_token)
|
| 162 |
+
if videos is not None:
|
| 163 |
+
assert len(text) == 1, "Video is not supported for batch size > 1"
|
| 164 |
+
video_metadata = output_kwargs.get("videos_kwargs", {}).get("video_metadata", None)
|
| 165 |
+
i = 0
|
| 166 |
+
index = 0
|
| 167 |
+
if self.video_token in text[i]:
|
| 168 |
+
each_frame = self.image_start_token + "<|placeholder|>" * self.image_processor.num_image_token + self.image_end_token
|
| 169 |
+
video_prompt = "This is a video:\n"
|
| 170 |
+
for j in range(video_num_patches[index]):
|
| 171 |
+
if video_metadata is not None and video_metadata.fps is not None:
|
| 172 |
+
timestamp = j / video_metadata.fps
|
| 173 |
+
video_prompt += f"Frame {j+1} sampled at {timestamp:.2f} seconds: {each_frame}\n"
|
| 174 |
+
else:
|
| 175 |
+
# Fallback to original format without timestamps
|
| 176 |
+
video_prompt += f"Frame {j+1}: {each_frame}\n"
|
| 177 |
+
|
| 178 |
+
text[i] = text[i].replace(self.video_token, video_prompt, 1)
|
| 179 |
+
text[i] = text[i].replace("<|placeholder|>", self.video_token)
|
| 180 |
+
|
| 181 |
+
return_tensors = output_kwargs["text_kwargs"].pop("return_tensors", None)
|
| 182 |
+
text_inputs = self.tokenizer(text, **output_kwargs["text_kwargs"])
|
| 183 |
+
return BatchFeature(data={**text_inputs, **image_inputs, **videos_inputs}, tensor_type=return_tensors)
|
| 184 |
+
|
| 185 |
+
def _get_num_multimodal_tokens(self, image_sizes=None, video_sizes=None, **kwargs):
|
| 186 |
+
"""
|
| 187 |
+
Computes the number of placeholder tokens needed for multimodal inputs with the given sizes.
|
| 188 |
+
Args:
|
| 189 |
+
image_sizes (`list[list[int]]`, *optional*):
|
| 190 |
+
The input sizes formatted as (height, width) per each image.
|
| 191 |
+
video_sizes (`list[list[int]]`, *optional*):
|
| 192 |
+
The input sizes formatted as (num_frames, height, width) per each video.
|
| 193 |
+
Returns:
|
| 194 |
+
`MultiModalData`: A `MultiModalData` object holding number of tokens per each of the provided
|
| 195 |
+
input modalities, along with other useful data.
|
| 196 |
+
"""
|
| 197 |
+
|
| 198 |
+
vision_data = {}
|
| 199 |
+
if image_sizes is not None:
|
| 200 |
+
images_kwargs = NemotronNanoVLV2ProcessorKwargs._defaults.get("images_kwargs", {})
|
| 201 |
+
images_kwargs.update(kwargs)
|
| 202 |
+
merge_size = images_kwargs.get("merge_size", None) or self.image_processor.merge_size
|
| 203 |
+
|
| 204 |
+
num_image_patches = [
|
| 205 |
+
self.image_processor.get_number_of_image_patches(*image_size, images_kwargs)
|
| 206 |
+
for image_size in image_sizes
|
| 207 |
+
]
|
| 208 |
+
num_image_tokens = [(num_patches // merge_size**2) for num_patches in num_image_patches]
|
| 209 |
+
vision_data.update({"num_image_tokens": num_image_tokens, "num_image_patches": num_image_patches})
|
| 210 |
+
return MultiModalData(**vision_data)
|
| 211 |
+
|
| 212 |
+
def batch_decode(self, *args, **kwargs):
|
| 213 |
+
"""
|
| 214 |
+
This method forwards all its arguments to the tokenizer's [`~PreTrainedTokenizer.batch_decode`]. Please
|
| 215 |
+
refer to the docstring of this method for more information.
|
| 216 |
+
"""
|
| 217 |
+
return self.tokenizer.batch_decode(*args, **kwargs)
|
| 218 |
+
|
| 219 |
+
def decode(self, *args, **kwargs):
|
| 220 |
+
"""
|
| 221 |
+
This method forwards all its arguments to the tokenizer's [`~PreTrainedTokenizer.decode`]. Please refer to
|
| 222 |
+
the docstring of this method for more information.
|
| 223 |
+
"""
|
| 224 |
+
return self.tokenizer.decode(*args, **kwargs)
|
| 225 |
+
|
| 226 |
+
def post_process_image_text_to_text(
|
| 227 |
+
self, generated_outputs, skip_special_tokens=True, clean_up_tokenization_spaces=False, **kwargs
|
| 228 |
+
):
|
| 229 |
+
"""
|
| 230 |
+
Post-process the output of the model to decode the text.
|
| 231 |
+
|
| 232 |
+
Args:
|
| 233 |
+
generated_outputs (`torch.Tensor` or `np.ndarray`):
|
| 234 |
+
The output of the model `generate` function. The output is expected to be a tensor of shape `(batch_size, sequence_length)`
|
| 235 |
+
or `(sequence_length,)`.
|
| 236 |
+
skip_special_tokens (`bool`, *optional*, defaults to `True`):
|
| 237 |
+
Whether or not to remove special tokens in the output. Argument passed to the tokenizer's `batch_decode` method.
|
| 238 |
+
clean_up_tokenization_spaces (`bool`, *optional*, defaults to `False`):
|
| 239 |
+
Whether or not to clean up the tokenization spaces. Argument passed to the tokenizer's `batch_decode` method.
|
| 240 |
+
**kwargs:
|
| 241 |
+
Additional arguments to be passed to the tokenizer's `batch_decode method`.
|
| 242 |
+
|
| 243 |
+
Returns:
|
| 244 |
+
`list[str]`: The decoded text.
|
| 245 |
+
"""
|
| 246 |
+
return self.tokenizer.batch_decode(
|
| 247 |
+
generated_outputs,
|
| 248 |
+
skip_special_tokens=skip_special_tokens,
|
| 249 |
+
clean_up_tokenization_spaces=clean_up_tokenization_spaces,
|
| 250 |
+
**kwargs,
|
| 251 |
+
)
|
| 252 |
+
|
| 253 |
+
@property
|
| 254 |
+
def model_input_names(self):
|
| 255 |
+
tokenizer_input_names = self.tokenizer.model_input_names
|
| 256 |
+
image_processor_input_names = self.image_processor.model_input_names
|
| 257 |
+
names_from_processor = list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
|
| 258 |
+
return names_from_processor + ["second_per_grid_ts"]
|
| 259 |
+
|
| 260 |
+
|
| 261 |
+
__all__ = ["NemotronNanoVLV2Processor"]
|
processing_utils.py
ADDED
|
@@ -0,0 +1,83 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import List, Optional, Union, Any, Dict
|
| 2 |
+
|
| 3 |
+
from PIL import Image
|
| 4 |
+
import torch
|
| 5 |
+
from transformers.image_processing_base import BatchFeature
|
| 6 |
+
from transformers.image_processing_utils_fast import BaseImageProcessorFast, divide_to_patches
|
| 7 |
+
from transformers.image_utils import (make_list_of_images, get_image_size,
|
| 8 |
+
get_image_type, ImageInput, ImageType, ChannelDimension)
|
| 9 |
+
from transformers.utils import TensorType
|
| 10 |
+
import torchvision.transforms as T
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
def get_internvl_target_ratios(
|
| 14 |
+
min_num: int,
|
| 15 |
+
max_num: int,
|
| 16 |
+
) -> list[tuple[int, int]]:
|
| 17 |
+
target_ratios = {(i, j)
|
| 18 |
+
for n in range(min_num, max_num + 1)
|
| 19 |
+
for i in range(1, n + 1)
|
| 20 |
+
for j in range(1, n + 1) if min_num <= i * j <= max_num}
|
| 21 |
+
return sorted(target_ratios, key=lambda x: x[0] * x[1])
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
def find_closest_aspect_ratio(aspect_ratio, target_ratios, width, height, image_size):
|
| 25 |
+
best_factor = float('-inf')
|
| 26 |
+
best_ratio = (1, 1)
|
| 27 |
+
area = width * height
|
| 28 |
+
for ratio in target_ratios:
|
| 29 |
+
target_aspect_ratio = ratio[0] / ratio[1]
|
| 30 |
+
factor_based_on_area_n_ratio = min(
|
| 31 |
+
(ratio[0]*ratio[1]*image_size*image_size)/ area, 0.6
|
| 32 |
+
)* min(
|
| 33 |
+
target_aspect_ratio/aspect_ratio, aspect_ratio/target_aspect_ratio)
|
| 34 |
+
if factor_based_on_area_n_ratio > best_factor:
|
| 35 |
+
best_factor = factor_based_on_area_n_ratio
|
| 36 |
+
best_ratio = ratio
|
| 37 |
+
return best_ratio
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
def calculate_targets(
|
| 41 |
+
orig_width: int,
|
| 42 |
+
orig_height: int,
|
| 43 |
+
target_ratios: list[tuple[int, int]],
|
| 44 |
+
image_size: int,
|
| 45 |
+
) -> tuple[int, int, int]:
|
| 46 |
+
aspect_ratio = orig_width / orig_height
|
| 47 |
+
|
| 48 |
+
# find the closest aspect ratio to the target
|
| 49 |
+
target_aspect_ratio = find_closest_aspect_ratio(
|
| 50 |
+
aspect_ratio,
|
| 51 |
+
target_ratios,
|
| 52 |
+
width=orig_width,
|
| 53 |
+
height=orig_height,
|
| 54 |
+
image_size=image_size,
|
| 55 |
+
)
|
| 56 |
+
|
| 57 |
+
# calculate the target width and height
|
| 58 |
+
target_width = image_size * target_aspect_ratio[0]
|
| 59 |
+
target_height = image_size * target_aspect_ratio[1]
|
| 60 |
+
blocks = target_aspect_ratio[0] * target_aspect_ratio[1]
|
| 61 |
+
|
| 62 |
+
return blocks, target_width, target_height
|
| 63 |
+
|
| 64 |
+
|
| 65 |
+
def dynamic_preprocess(image, image_size=512, max_num_tiles=12, use_thumbnail=True):
|
| 66 |
+
orig_height, orig_width = get_image_size(image, channel_dim=ChannelDimension.FIRST)
|
| 67 |
+
target_ratios = get_internvl_target_ratios(1, max_num_tiles)
|
| 68 |
+
|
| 69 |
+
blocks, target_width, target_height = calculate_targets(
|
| 70 |
+
orig_width,
|
| 71 |
+
orig_height,
|
| 72 |
+
target_ratios,
|
| 73 |
+
image_size
|
| 74 |
+
)
|
| 75 |
+
# resize the image
|
| 76 |
+
resized_img = T.Resize((target_width, target_height), interpolation=T.InterpolationMode.BICUBIC)(image)
|
| 77 |
+
patches = divide_to_patches(resized_img, image_size)
|
| 78 |
+
assert len(patches) == blocks
|
| 79 |
+
if use_thumbnail and len(patches) != 1:
|
| 80 |
+
thumbnail_img = T.Resize((image_size, image_size), interpolation=T.InterpolationMode.BICUBIC)(image)
|
| 81 |
+
patches.append(thumbnail_img)
|
| 82 |
+
|
| 83 |
+
return patches
|
safety.md
ADDED
|
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Field | Response
|
| 2 |
+
:---------------------------------------------------|:----------------------------------
|
| 3 |
+
Model Application Field(s): | Customer Service, Media & Entertainment, Enterprise Document Intelligence and Processing & Retail
|
| 4 |
+
Describe the life critical impact (if present). | Not Applicable
|
| 5 |
+
Description of methods implemented in data acquisition or processing, if any, to address other types of potentially harmful data in the training, testing, and validation data: | We used a guard model for content safety to exclude potentially harmful data from training.
|
| 6 |
+
Description of any methods implemented in data acquisition or processing, if any, to address illegal or harmful content in the training data, including, but not limited to, child sexual abuse material (CSAM) and non-consensual intimate imagery (NCII) | We used a Gemma-3 4B-based guard model trained on [Nemotron Content Safety Dataset v2](https://huggingface.co/datasets/nvidia/Aegis-AI-Content-Safety-Dataset-2.0) for content safety to exclude potentially illegal or harmful content from the training. We also did CSAM checks on our image datasets for training.
|
| 7 |
+
Use Case Restrictions: | Use of this model is governed by the [ NVIDIA Open Model License Agreement](https://www.nvidia.com/en-us/agreements/enterprise-software/nvidia-open-model-license/)
|
| 8 |
+
Model and dataset restrictions: | The Principle of least privilege (PoLP) is applied limiting access for dataset generation and model development. Restrictions enforce dataset access during training, and dataset license constraints adhered to.
|
| 9 |
+
This AI model was developed based on our policies to ensure responsible data handling and risk mitigation. The datasets used for training have been scanned for harmful content and illegal content, consistent with our policies including scanning for Child Sexual Abuse Material (CSAM). Ongoing review and monitoring mechanisms are in place based on our policies and to maintain data integrity.
|
| 10 |
+
The model was optimized explicitly for instruction following and as such is more susceptible to prompt injection and jailbreaking in various forms as a result of its instruction tuning. This means that the model should be paired with additional rails or system filtering to limit exposure to instructions from malicious sources -- either directly or indirectly by retrieval (e.g. via visiting a website) -- as they may yield outputs that can lead to harmful, system-level outcomes up to and including remote code execution in agentic systems when effective security controls including guardrails are not in place. The model may generate answers that may be inaccurate, omit key information, include irrelevant or redundant text, or produce socially unacceptable or undesirable text, even if the prompt itself does not include anything explicitly offensive.
|
special_tokens_map.json
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"bos_token": {
|
| 3 |
+
"content": "<s>",
|
| 4 |
+
"lstrip": false,
|
| 5 |
+
"normalized": false,
|
| 6 |
+
"rstrip": false,
|
| 7 |
+
"single_word": false
|
| 8 |
+
},
|
| 9 |
+
"eos_token": {
|
| 10 |
+
"content": "<SPECIAL_12>",
|
| 11 |
+
"lstrip": false,
|
| 12 |
+
"normalized": false,
|
| 13 |
+
"rstrip": false,
|
| 14 |
+
"single_word": false
|
| 15 |
+
},
|
| 16 |
+
"unk_token": {
|
| 17 |
+
"content": "<unk>",
|
| 18 |
+
"lstrip": false,
|
| 19 |
+
"normalized": false,
|
| 20 |
+
"rstrip": false,
|
| 21 |
+
"single_word": false
|
| 22 |
+
}
|
| 23 |
+
}
|
tokenizer.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:db8e35444fca3a2b98e2c8e927a8f1d8b1ba9d4b349e13ce5aafdb11b6404205
|
| 3 |
+
size 17079976
|
tokenizer_config.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
video_io.py
ADDED
|
@@ -0,0 +1,176 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import base64
|
| 3 |
+
import mimetypes
|
| 4 |
+
from PIL import Image
|
| 5 |
+
import io
|
| 6 |
+
from transformers.video_utils import VideoMetadata
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
def encode_pil_to_jpeg_data_url(pil_image):
|
| 10 |
+
from io import BytesIO
|
| 11 |
+
buf = BytesIO()
|
| 12 |
+
pil_image.save(buf, format="JPEG")
|
| 13 |
+
b64 = base64.b64encode(buf.getvalue()).decode("utf-8")
|
| 14 |
+
return f"data:image/jpeg;base64,{b64}"
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
def sample_video_frames_to_data_urls(video_path_local, fps=1, nframe=0, nframe_max=-1):
|
| 18 |
+
"""
|
| 19 |
+
Sample frames from a video and return base64-encoded data URLs along with metadata.
|
| 20 |
+
|
| 21 |
+
Args:
|
| 22 |
+
video_path_local: Path to the video file
|
| 23 |
+
fps: Target frames per second for sampling (if > 0, uses fps-based sampling)
|
| 24 |
+
nframe: Number of frames to sample (used if fps <= 0)
|
| 25 |
+
nframe_max: Maximum number of frames to sample
|
| 26 |
+
|
| 27 |
+
Returns:
|
| 28 |
+
tuple: (frame_data_urls, metadata)
|
| 29 |
+
- frame_data_urls: List of base64-encoded frame images
|
| 30 |
+
- metadata: VideoMetadata dataclass containing info about the sampled frames:
|
| 31 |
+
- total_num_frames: Number of sampled frames
|
| 32 |
+
- fps: Effective frame rate of the sampled frames
|
| 33 |
+
- duration: Duration covered by the sampled frames (in seconds)
|
| 34 |
+
- video_backend: Backend used for video processing ('decord')
|
| 35 |
+
"""
|
| 36 |
+
import numpy as np
|
| 37 |
+
from PIL import Image
|
| 38 |
+
import decord
|
| 39 |
+
|
| 40 |
+
vid = decord.VideoReader(video_path_local)
|
| 41 |
+
total_frames = len(vid)
|
| 42 |
+
video_fps = vid.get_avg_fps()
|
| 43 |
+
total_duration = total_frames / max(1e-6, video_fps)
|
| 44 |
+
|
| 45 |
+
if fps > 0:
|
| 46 |
+
required_frames = int(total_duration * fps)
|
| 47 |
+
desired_frames = max(1, required_frames)
|
| 48 |
+
if nframe_max > 0 and desired_frames > nframe_max:
|
| 49 |
+
desired_frames = nframe_max
|
| 50 |
+
if desired_frames >= total_frames:
|
| 51 |
+
indices = list(range(total_frames))
|
| 52 |
+
elif desired_frames == 1:
|
| 53 |
+
indices = [0] # Always use first frame for single frame sampling
|
| 54 |
+
else:
|
| 55 |
+
# Generate evenly spaced indices and ensure uniqueness
|
| 56 |
+
raw_indices = np.linspace(0, total_frames - 1, desired_frames)
|
| 57 |
+
indices = list(np.unique(np.round(raw_indices).astype(int)))
|
| 58 |
+
else:
|
| 59 |
+
desired_frames = max(1, int(nframe) if nframe and nframe > 0 else 8)
|
| 60 |
+
if nframe_max > 0 and desired_frames > nframe_max:
|
| 61 |
+
desired_frames = nframe_max
|
| 62 |
+
if desired_frames >= total_frames:
|
| 63 |
+
indices = list(range(total_frames))
|
| 64 |
+
elif desired_frames == 1:
|
| 65 |
+
indices = [0] # Always use first frame for single frame sampling
|
| 66 |
+
else:
|
| 67 |
+
# Generate evenly spaced indices and ensure uniqueness
|
| 68 |
+
raw_indices = np.linspace(0, total_frames - 1, desired_frames)
|
| 69 |
+
indices = list(np.unique(np.round(raw_indices).astype(int)))
|
| 70 |
+
|
| 71 |
+
images = [Image.fromarray(vid[i].asnumpy()) for i in indices]
|
| 72 |
+
frame_urls = [encode_pil_to_jpeg_data_url(im) for im in images]
|
| 73 |
+
|
| 74 |
+
# Calculate timestamps for each sampled frame
|
| 75 |
+
timestamps = [float(idx) / video_fps for idx in indices]
|
| 76 |
+
|
| 77 |
+
# Calculate metadata for the sampled frames
|
| 78 |
+
sampled_num_frames = len(indices)
|
| 79 |
+
|
| 80 |
+
# Duration is the time span from first to last frame
|
| 81 |
+
if len(timestamps) > 1:
|
| 82 |
+
sampled_duration = timestamps[-1] - timestamps[0]
|
| 83 |
+
sampled_fps = (sampled_num_frames - 1) / sampled_duration if sampled_duration > 0 else 1.0
|
| 84 |
+
else:
|
| 85 |
+
# Single frame case
|
| 86 |
+
sampled_duration = None
|
| 87 |
+
sampled_fps = None
|
| 88 |
+
|
| 89 |
+
metadata = VideoMetadata(
|
| 90 |
+
total_num_frames=sampled_num_frames,
|
| 91 |
+
fps=sampled_fps,
|
| 92 |
+
duration=sampled_duration,
|
| 93 |
+
video_backend=None,
|
| 94 |
+
)
|
| 95 |
+
|
| 96 |
+
return frame_urls, metadata
|
| 97 |
+
|
| 98 |
+
|
| 99 |
+
def maybe_path_or_url_to_data_urls(path_or_url, fps=1, nframe=0, nframe_max=-1):
|
| 100 |
+
"""
|
| 101 |
+
Convert a path or URL to data URLs, handling videos, images, and remote files.
|
| 102 |
+
|
| 103 |
+
Args:
|
| 104 |
+
path_or_url: Path or URL to the media file
|
| 105 |
+
fps: Target frames per second for video sampling (if > 0, uses fps-based sampling)
|
| 106 |
+
nframe: Number of frames to sample from video (used if fps <= 0)
|
| 107 |
+
nframe_max: Maximum number of frames to sample
|
| 108 |
+
|
| 109 |
+
Returns:
|
| 110 |
+
tuple: (data_urls, metadata)
|
| 111 |
+
- data_urls: List of base64-encoded data URLs
|
| 112 |
+
- metadata: VideoMetadata dataclass with video metadata or None for images
|
| 113 |
+
"""
|
| 114 |
+
val = str(path_or_url or "")
|
| 115 |
+
low = val.lower()
|
| 116 |
+
|
| 117 |
+
# Handle data URLs
|
| 118 |
+
if low.startswith("data:"):
|
| 119 |
+
if low.startswith("data:video/mp4"):
|
| 120 |
+
header, _, b64part = val.partition(",")
|
| 121 |
+
if not b64part:
|
| 122 |
+
return [val], None
|
| 123 |
+
import tempfile
|
| 124 |
+
tmp = tempfile.NamedTemporaryFile(suffix=".mp4", delete=False)
|
| 125 |
+
try:
|
| 126 |
+
tmp.write(base64.b64decode(b64part))
|
| 127 |
+
tmp.flush(); tmp.close()
|
| 128 |
+
return sample_video_frames_to_data_urls(tmp.name, fps=fps, nframe=nframe, nframe_max=nframe_max)
|
| 129 |
+
finally:
|
| 130 |
+
try:
|
| 131 |
+
os.unlink(tmp.name)
|
| 132 |
+
except Exception:
|
| 133 |
+
pass
|
| 134 |
+
return [val], None
|
| 135 |
+
|
| 136 |
+
# Remote URL
|
| 137 |
+
if low.startswith("http://") or low.startswith("https://"):
|
| 138 |
+
if low.endswith(".mp4"):
|
| 139 |
+
try:
|
| 140 |
+
import tempfile, urllib.request
|
| 141 |
+
with tempfile.NamedTemporaryFile(suffix=".mp4", delete=False) as tmpf:
|
| 142 |
+
urllib.request.urlretrieve(val, tmpf.name)
|
| 143 |
+
local_path = tmpf.name
|
| 144 |
+
result = sample_video_frames_to_data_urls(local_path, fps=fps, nframe=nframe, nframe_max=nframe_max)
|
| 145 |
+
try:
|
| 146 |
+
os.unlink(local_path)
|
| 147 |
+
except Exception:
|
| 148 |
+
pass
|
| 149 |
+
return result
|
| 150 |
+
except Exception:
|
| 151 |
+
return [val], None
|
| 152 |
+
return [val], None
|
| 153 |
+
|
| 154 |
+
# Local path
|
| 155 |
+
if os.path.exists(val):
|
| 156 |
+
mime, _ = mimetypes.guess_type(val)
|
| 157 |
+
if mime and mime.startswith("image/"):
|
| 158 |
+
with open(val, "rb") as f:
|
| 159 |
+
b64 = base64.b64encode(f.read()).decode("utf-8")
|
| 160 |
+
return [f"data:{mime};base64,{b64}"], None
|
| 161 |
+
if mime == "video/mp4" or (mime is None and val.endswith(".mp4")):
|
| 162 |
+
return sample_video_frames_to_data_urls(val, fps=fps, nframe=nframe, nframe_max=nframe_max)
|
| 163 |
+
# Fallback: treat as binary image
|
| 164 |
+
with open(val, "rb") as f:
|
| 165 |
+
b64 = base64.b64encode(f.read()).decode("utf-8")
|
| 166 |
+
return [f"data:image/jpeg;base64,{b64}"], None
|
| 167 |
+
|
| 168 |
+
return [val], None
|
| 169 |
+
|
| 170 |
+
|
| 171 |
+
def pil_image_from_base64(b64_str: str) -> Image.Image:
|
| 172 |
+
# Handle data URLs like "data:image/png;base64,...."
|
| 173 |
+
if b64_str.startswith('data:'):
|
| 174 |
+
b64_str = b64_str.split(',', 1)[1]
|
| 175 |
+
img_bytes = base64.b64decode(b64_str)
|
| 176 |
+
return Image.open(io.BytesIO(img_bytes))
|