| # DeepSeek-OCR Colab Requirements | |
| # For local setup only - Google Colab already includes most of these | |
| # Core dependencies | |
| torch>=2.0.0 | |
| torchvision>=0.15.0 | |
| torchaudio>=2.0.0 | |
| # Transformers and tokenizers | |
| transformers==4.46.3 | |
| tokenizers==0.20.3 | |
| # PDF and image processing | |
| PyMuPDF>=1.23.0 | |
| img2pdf>=0.5.0 | |
| Pillow>=10.0.0 | |
| # Utilities | |
| einops>=0.7.0 | |
| easydict>=1.10 | |
| addict>=2.4.0 | |
| numpy>=1.24.0 | |
| # Flash Attention (optional, may require compilation) | |
| # flash-attn==2.7.3 | |
| # Additional dependencies | |
| safetensors>=0.4.1 | |
| huggingface-hub>=0.23.2 | |
| # For Jupyter notebooks (local) | |
| jupyter>=1.0.0 | |
| ipywidgets>=8.0.0 | |
| # Note: Flash Attention installation can be challenging and may require: | |
| # - CUDA Toolkit 11.8 or 12.1+ | |
| # - Matching PyTorch CUDA version | |
| # - C++ compiler | |
| # If flash-attn fails to install, the notebook will fall back to standard attention | |