# ============================================================================= # NEBULA-X CONFIGURATION FILES # Francisco Angulo de Lafuente - Agnuxo # ============================================================================= # requirements.txt # Core dependencies for NEBULA-X torch>=2.0.0 transformers>=4.30.0 datasets>=2.14.0 huggingface_hub>=0.16.0 accelerate>=0.21.0 # Scientific computing numpy>=1.24.0 scipy>=1.10.0 pandas>=2.0.0 scikit-learn>=1.3.0 # Quantum computing pennylane>=0.32.0 pennylane-lightning>=0.32.0 # GPU acceleration cupy-cuda12x>=12.0.0 # For CUDA 12.x pycuda>=2022.2 # Optical and raytracing pillow>=10.0.0 opencv-python>=4.8.0 # Evolutionary algorithms deap>=1.4.1 # Networking and P2P websockets>=11.0 aiohttp>=3.8.0 # Visualization matplotlib>=3.7.0 seaborn>=0.12.0 plotly>=5.15.0 # Development and testing pytest>=7.4.0 pytest-asyncio>=0.21.0 black>=23.0.0 flake8>=6.0.0 mypy>=1.5.0 # Documentation sphinx>=7.1.0 sphinx-rtd-theme>=1.3.0 # Deployment docker>=6.0.0 gradio>=3.39.0 streamlit>=1.25.0 --- # config.yaml # Main configuration file for NEBULA-X model: name: "NEBULA-X" version: "1.0.0" author: "Francisco Angulo de Lafuente (Agnuxo)" license: "Apache 2.0" # Architecture parameters architecture: hidden_size: 768 num_hidden_layers: 12 num_attention_heads: 12 intermediate_size: 3072 max_position_embeddings: 2048 vocab_size: 50000 dropout: 0.1 layer_norm_eps: 1e-12 # NEBULA-X specific features nebula_features: holographic_memory: enabled: true resolution: [256, 256] coherence_length: 1000 interference_threshold: 0.1 storage_planes: 10 quantum_processing: enabled: true qubits_per_neuron: 4 decoherence_time: 1e-6 quantum_noise_level: 0.01 error_correction: "basic" optical_raytracing: enabled: true rays_per_neuron: 1000 max_bounces: 10 monte_carlo_samples: 10000 wavelength: 632.8e-9 use_gpu_acceleration: true evolutionary_optimization: enabled: true population_size: 100 mutation_rate: 0.1 crossover_rate: 0.8 generations: 1000 selection_method: "tournament" p2p_networking: enabled: false # Disabled by default for security port: 8080 max_peers: 50 sync_interval: 10.0 encryption: true training: # Training hyperparameters learning_rate: 1e-4 batch_size: 32 gradient_accumulation_steps: 4 max_epochs: 10 warmup_steps: 1000 weight_decay: 0.01 adam_epsilon: 1e-8 max_grad_norm: 1.0 # Holographic training specific holographic_learning_rate: 5e-5 quantum_adaptation_rate: 1e-5 optical_convergence_threshold: 1e-6 # Checkpointing save_steps: 1000 eval_steps: 500 logging_steps: 100 save_total_limit: 3 # Data train_dataset: null eval_dataset: null max_seq_length: 2048 preprocessing_num_workers: 4 evaluation: # Benchmark configurations benchmarks: mmlu: enabled: true num_samples: 1000 batch_size: 8 subjects: ["all"] gsm8k: enabled: true num_samples: 500 batch_size: 4 chain_of_thought: true hellaswag: enabled: true num_samples: 1000 batch_size: 8 arc: enabled: true num_samples: 500 batch_size: 8 challenge_set: true humaneval: enabled: false # Resource intensive num_samples: 164 batch_size: 1 temperature: 0.2 # Evaluation metrics metrics: standard: ["accuracy", "f1", "precision", "recall"] holographic: ["coherence", "interference_score", "pattern_stability"] quantum: ["entanglement_depth", "superposition_utilization", "decoherence_rate"] optical: ["raytracing_efficiency", "coherence_length", "photon_utilization"] hardware: # GPU configuration gpu: device: "cuda" mixed_precision: true compile_model: true memory_fraction: 0.8 # CPU configuration cpu: num_workers: 8 pin_memory: true # Specialized hardware quantum_simulator: backend: "pennylane" device: "default.qubit" shots: 1024 raytracing: use_rt_cores: true use_tensor_cores: true cuda_kernels: true deployment: # Hugging Face Hub hub: model_name: "Agnuxo/NEBULA-X" organization: "Agnuxo" private: false push_to_hub: true create_model_card: true # API deployment api: host: "0.0.0.0" port: 8000 workers: 4 timeout: 300 max_batch_size: 16 # Container deployment container: base_image: "nvidia/cuda:12.2-devel-ubuntu22.04" python_version: "3.11" expose_port: 8000 logging: level: "INFO" format: "%(asctime)s - %(name)s - %(levelname)s - %(message)s" file: "nebula_x.log" max_bytes: 10485760 # 10MB backup_count: 5 # Weights & Biases integration wandb: enabled: false project: "nebula-x" entity: "agnuxo" tags: ["holographic", "quantum", "optical"] --- # docker-compose.yml # Docker Compose configuration for NEBULA-X deployment version: '3.8' services: nebula-x: build: context: . dockerfile: Dockerfile args: PYTHON_VERSION: 3.11 CUDA_VERSION: 12.2 container_name: nebula-x-model ports: - "8000:8000" - "8080:8080" # P2P networking volumes: - ./models:/app/models - ./data:/app/data - ./logs:/app/logs - ./checkpoints:/app/checkpoints environment: - CUDA_VISIBLE_DEVICES=0 - TOKENIZERS_PARALLELISM=false - PYTORCH_CUDA_ALLOC_CONF=max_split_size_mb:512 - NEBULA_X_CONFIG_PATH=/app/config.yaml - NEBULA_X_LOG_LEVEL=INFO runtime: nvidia deploy: resources: reservations: devices: - driver: nvidia count: 1 capabilities: [gpu] depends_on: - redis - monitoring restart: unless-stopped healthcheck: test: ["CMD", "curl", "-f", "http://localhost:8000/health"] interval: 30s timeout: 10s retries: 3 start_period: 40s redis: image: redis:7-alpine container_name: nebula-x-redis ports: - "6379:6379" volumes: - redis_data:/data restart: unless-stopped monitoring: image: prom/prometheus:latest container_name: nebula-x-monitoring ports: - "9090:9090" volumes: - ./monitoring/prometheus.yml:/etc/prometheus/prometheus.yml - prometheus_data:/prometheus restart: unless-stopped gradio-demo: build: context: . dockerfile: Dockerfile.demo container_name: nebula-x-demo ports: - "7860:7860" environment: - NEBULA_X_API_URL=http://nebula-x:8000 depends_on: - nebula-x restart: unless-stopped volumes: redis_data: prometheus_data: networks: default: name: nebula-x-network --- # Dockerfile # Multi-stage Dockerfile for NEBULA-X deployment ARG PYTHON_VERSION=3.11 ARG CUDA_VERSION=12.2 # Base stage with CUDA support FROM nvidia/cuda:${CUDA_VERSION}-devel-ubuntu22.04 AS base # Install system dependencies RUN apt-get update && apt-get install -y \ python${PYTHON_VERSION} \ python${PYTHON_VERSION}-dev \ python3-pip \ git \ curl \ wget \ build-essential \ cmake \ ninja-build \ libopenblas-dev \ liblapack-dev \ libeigen3-dev \ libfftw3-dev \ && rm -rf /var/lib/apt/lists/* # Set Python as default RUN ln -s /usr/bin/python${PYTHON_VERSION} /usr/bin/python RUN ln -s /usr/bin/python${PYTHON_VERSION} /usr/bin/python3 # Upgrade pip RUN python -m pip install --upgrade pip setuptools wheel # Development stage FROM base AS development WORKDIR /app # Copy requirements first for better Docker layer caching COPY requirements.txt . COPY requirements-dev.txt . # Install Python dependencies RUN pip install --no-cache-dir -r requirements.txt RUN pip install --no-cache-dir -r requirements-dev.txt # Copy source code COPY . . # Install NEBULA-X in development mode RUN pip install -e . # Production stage FROM base AS production WORKDIR /app # Create non-root user for security RUN groupadd -r nebulax && useradd -r -g nebulax nebulax # Copy only production requirements COPY requirements.txt . # Install production dependencies RUN pip install --no-cache-dir -r requirements.txt # Copy application code COPY --chown=nebulax:nebulax . . # Install NEBULA-X RUN pip install --no-cache-dir . # Create necessary directories RUN mkdir -p /app/models /app/data /app/logs /app/checkpoints && \ chown -R nebulax:nebulax /app # Switch to non-root user USER nebulax # Expose ports EXPOSE 8000 8080 # Health check HEALTHCHECK --interval=30s --timeout=10s --start-period=5s --retries=3 \ CMD curl -f http://localhost:8000/health || exit 1 # Default command CMD ["python", "-m", "nebula_x.api.server", "--host", "0.0.0.0", "--port", "8000"] --- # Dockerfile.demo # Dockerfile for Gradio demo interface FROM python:3.11-slim WORKDIR /app # Install system dependencies RUN apt-get update && apt-get install -y \ curl \ && rm -rf /var/lib/apt/lists/* # Copy requirements COPY requirements-demo.txt . # Install dependencies RUN pip install --no-cache-dir -r requirements-demo.txt # Copy demo files COPY demos/ ./demos/ COPY config.yaml . # Create non-root user RUN groupadd -r demo && useradd -r -g demo demo RUN chown -R demo:demo /app USER demo # Expose Gradio port EXPOSE 7860 # Run demo CMD ["python", "demos/gradio_interface.py"] --- # .github/workflows/ci.yml # GitHub Actions CI/CD pipeline name: NEBULA-X CI/CD on: push: branches: [ main, develop ] pull_request: branches: [ main ] release: types: [ published ] env: PYTHON_VERSION: 3.11 CUDA_VERSION: 12.2 jobs: test: runs-on: ubuntu-latest strategy: matrix: python-version: [3.9, 3.10, 3.11] steps: - uses: actions/checkout@v4 - name: Set up Python ${{ matrix.python-version }} uses: actions/setup-python@v4 with: python-version: ${{ matrix.python-version }} - name: Cache pip dependencies uses: actions/cache@v3 with: path: ~/.cache/pip key: ${{ runner.os }}-pip-${{ hashFiles('requirements*.txt') }} restore-keys: | ${{ runner.os }}-pip- - name: Install dependencies run: | python -m pip install --upgrade pip pip install -r requirements.txt pip install -r requirements-test.txt - name: Lint with flake8 run: | flake8 nebula_x/ --count --select=E9,F63,F7,F82 --show-source --statistics flake8 nebula_x/ --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics - name: Type check with mypy run: | mypy nebula_x/ - name: Test with pytest run: | pytest tests/ -v --cov=nebula_x --cov-report=xml - name: Upload coverage to Codecov uses: codecov/codecov-action@v3 with: file: ./coverage.xml flags: unittests name: codecov-umbrella test-gpu: runs-on: [self-hosted, gpu] if: github.event_name == 'push' && github.ref == 'refs/heads/main' steps: - uses: actions/checkout@v4 - name: Set up Python uses: actions/setup-python@v4 with: python-version: ${{ env.PYTHON_VERSION }} - name: Install dependencies run: | python -m pip install --upgrade pip pip install -r requirements.txt pip install -r requirements-test.txt - name: Test GPU functionality run: | pytest tests/test_gpu/ -v -m gpu - name: Run benchmarks run: | python -m nebula_x.benchmarks.run_benchmarks --quick build-docker: runs-on: ubuntu-latest needs: test steps: - uses: actions/checkout@v4 - name: Set up Docker Buildx uses: docker/setup-buildx-action@v3 - name: Login to DockerHub if: github.event_name != 'pull_request' uses: docker/login-action@v3 with: username: ${{ secrets.DOCKERHUB_USERNAME }} password: ${{ secrets.DOCKERHUB_TOKEN }} - name: Extract metadata id: meta uses: docker/metadata-action@v5 with: images: agnuxo/nebula-x tags: | type=ref,event=branch type=ref,event=pr type=semver,pattern={{version}} type=semver,pattern={{major}}.{{minor}} - name: Build and push Docker image uses: docker/build-push-action@v5 with: context: . target: production push: ${{ github.event_name != 'pull_request' }} tags: ${{ steps.meta.outputs.tags }} labels: ${{ steps.meta.outputs.labels }} cache-from: type=gha cache-to: type=gha,mode=max deploy-hub: runs-on: ubuntu-latest needs: [test, test-gpu] if: github.event_name == 'release' steps: - uses: actions/checkout@v4 - name: Set up Python uses: actions/setup-python@v4 with: python-version: ${{ env.PYTHON_VERSION }} - name: Install dependencies run: | python -m pip install --upgrade pip pip install -r requirements.txt pip install huggingface_hub - name: Deploy to Hugging Face Hub env: HF_TOKEN: ${{ secrets.HF_TOKEN }} run: | python scripts/deploy_to_hub.py \ --model-name Agnuxo/NEBULA-X \ --version ${{ github.ref_name }} --- # .gitignore # Git ignore file for NEBULA-X project # Python __pycache__/ *.py[cod] *$py.class *.so .Python build/ develop-eggs/ dist/ downloads/ eggs/ .eggs/ lib/ lib64/ parts/ sdist/ var/ wheels/ share/python-wheels/ *.egg-info/ .installed.cfg *.egg MANIFEST # PyTorch *.pth *.pt *.bin *.safetensors # Jupyter Notebook .ipynb_checkpoints # IPython profile_default/ ipython_config.py # Virtual environments .env .venv env/ venv/ ENV/ env.bak/ venv.bak/ # IDE .vscode/ .idea/ *.swp *.swo *~ # OS .DS_Store .DS_Store? ._* .Spotlight-V100 .Trashes ehthumbs.db Thumbs.db # Project specific models/ checkpoints/ data/ logs/ outputs/ cache/ wandb/ benchmark_reports/ *.log # Docker .dockerignore # Secrets .env.local .env.production secrets.yaml api_keys.txt # Large files *.h5 *.hdf5 *.pickle *.pkl *.npy *.npz # Temporary files tmp/ temp/ .tmp/ # Coverage .coverage .pytest_cache/ htmlcov/ .tox/ .nox/ .coverage.* # Documentation builds docs/_build/ docs/build/ site/ --- # requirements-dev.txt # Development dependencies # Testing pytest>=7.4.0 pytest-asyncio>=0.21.0 pytest-cov>=4.1.0 pytest-mock>=3.11.0 pytest-xdist>=3.3.0 # Code quality black>=23.0.0 isort>=5.12.0 flake8>=6.0.0 mypy>=1.5.0 pre-commit>=3.3.0 # Documentation sphinx>=7.1.0 sphinx-rtd-theme>=1.3.0 myst-parser>=2.0.0 # Debugging ipdb>=0.13.0 pdb++>=0.10.0 # Profiling line_profiler>=4.1.0 memory_profiler>=0.61.0 # Jupyter jupyter>=1.0.0 jupyterlab>=4.0.0 ipywidgets>=8.0.0 --- # requirements-demo.txt # Dependencies for demo applications gradio>=3.39.0 streamlit>=1.25.0 fastapi>=0.100.0 uvicorn[standard]>=0.23.0 requests>=2.31.0 pillow>=10.0.0 matplotlib>=3.7.0 plotly>=5.15.0 --- # setup.py # Setup configuration for NEBULA-X package from setuptools import setup, find_packages import os # Read long description from README with open("README.md", "r", encoding="utf-8") as fh: long_description = fh.read() # Read requirements from requirements.txt with open("requirements.txt", "r", encoding="utf-8") as fh: requirements = [line.strip() for line in fh if line.strip() and not line.startswith("#")] setup( name="nebula-x", version="1.0.0", author="Francisco Angulo de Lafuente", author_email="contact@agnuxo.com", description="Enhanced Unified Holographic Neural Network with Quantum Processing", long_description=long_description, long_description_content_type="text/markdown", url="https://github.com/Agnuxo1/NEBULA-X", packages=find_packages(exclude=["tests*", "docs*"]), classifiers=[ "Development Status :: 4 - Beta", "Intended Audience :: Science/Research", "Intended Audience :: Developers", "License :: OSI Approved :: Apache Software License", "Operating System :: OS Independent", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Topic :: Scientific/Engineering :: Artificial Intelligence", "Topic :: Scientific/Engineering :: Physics", "Topic :: Software Development :: Libraries :: Python Modules", ], python_requires=">=3.9", install_requires=requirements, extras_require={ "dev": [ "pytest>=7.4.0", "black>=23.0.0", "flake8>=6.0.0", "mypy>=1.5.0", ], "docs": [ "sphinx>=7.1.0", "sphinx-rtd-theme>=1.3.0", ], "demo": [ "gradio>=3.39.0", "streamlit>=1.25.0", ], }, entry_points={ "console_scripts": [ "nebula-x=nebula_x.cli:main", "nebula-x-benchmark=nebula_x.benchmarks.cli:main", "nebula-x-train=nebula_x.training.cli:main", "nebula-x-serve=nebula_x.api.server:main", ], }, include_package_data=True, package_data={ "nebula_x": [ "config/*.yaml", "data/*.json", "templates/*.html", ], }, keywords=[ "artificial intelligence", "holographic neural networks", "quantum computing", "optical computing", "transformer", "deep learning", "machine learning", "neural networks", "raytracing", "photonic computing", ], project_urls={ "Bug Reports": "https://github.com/Agnuxo1/NEBULA-X/issues", "Source": "https://github.com/Agnuxo1/NEBULA-X", "Documentation": "https://nebula-x.readthedocs.io/", "Hugging Face": "https://huggingface.co/Agnuxo/NEBULA-X", }, ) --- # pyproject.toml # Modern Python project configuration [build-system] requires = ["setuptools>=61.0", "wheel"] build-backend = "setuptools.build_meta" [project] name = "nebula-x" version = "1.0.0" description = "Enhanced Unified Holographic Neural Network with Quantum Processing" readme = "README.md" license = {text = "Apache-2.0"} authors = [ {name = "Francisco Angulo de Lafuente", email = "contact@agnuxo.com"} ] maintainers = [ {name = "Francisco Angulo de Lafuente", email = "contact@agnuxo.com"} ] keywords = [ "artificial intelligence", "holographic neural networks", "quantum computing", "optical computing", "transformer", "deep learning" ] classifiers = [ "Development Status :: 4 - Beta", "Intended Audience :: Science/Research", "License :: OSI Approved :: Apache Software License", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Topic :: Scientific/Engineering :: Artificial Intelligence", ] requires-python = ">=3.9" dependencies = [ "torch>=2.0.0", "transformers>=4.30.0", "datasets>=2.14.0", "huggingface_hub>=0.16.0", "numpy>=1.24.0", "scipy>=1.10.0", "pandas>=2.0.0", "pillow>=10.0.0", "pyyaml>=6.0", "tqdm>=4.65.0", ] [project.optional-dependencies] quantum = ["pennylane>=0.32.0"] gpu = ["cupy-cuda12x>=12.0.0", "pycuda>=2022.2"] viz = ["matplotlib>=3.7.0", "seaborn>=0.12.0", "plotly>=5.15.0"] dev = [ "pytest>=7.4.0", "black>=23.0.0", "isort>=5.12.0", "flake8>=6.0.0", "mypy>=1.5.0", "pre-commit>=3.3.0", ] docs = [ "sphinx>=7.1.0", "sphinx-rtd-theme>=1.3.0", "myst-parser>=2.0.0", ] demo = [ "gradio>=3.39.0", "streamlit>=1.25.0", "fastapi>=0.100.0", "uvicorn[standard]>=0.23.0", ] [project.scripts] nebula-x = "nebula_x.cli:main" nebula-x-benchmark = "nebula_x.benchmarks.cli:main" nebula-x-train = "nebula_x.training.cli:main" nebula-x-serve = "nebula_x.api.server:main" [project.urls] Homepage = "https://github.com/Agnuxo1/NEBULA-X" Repository = "https://github.com/Agnuxo1/NEBULA-X" Documentation = "https://nebula-x.readthedocs.io/" "Bug Tracker" = "https://github.com/Agnuxo1/NEBULA-X/issues" "Hugging Face" = "https://huggingface.co/Agnuxo/NEBULA-X" [tool.setuptools] package-dir = {"" = "."} [tool.setuptools.packages.find] exclude = ["tests*", "docs*", "examples*"] [tool.black] line-length = 88 target-version = ['py39', 'py310', 'py311'] include = '\.pyi?$' extend-exclude = ''' /( # directories \.eggs | \.git | \.hg | \.mypy_cache | \.tox | \.venv | build | dist )/ ''' [tool.isort] profile = "black" multi_line_output = 3 line_length = 88 known_first_party = ["nebula_x"] [tool.mypy] python_version = "3.9" warn_return_any = true warn_unused_configs = true disallow_untyped_defs = false disallow_incomplete_defs = false check_untyped_defs = true disallow_untyped_decorators = false no_implicit_optional = true warn_redundant_casts = true warn_unused_ignores = true warn_no_return = true warn_unreachable = true strict_equality = true [[tool.mypy.overrides]] module = [ "cupy.*", "pycuda.*", "pennylane.*", "deap.*", "cv2.*", ] ignore_missing_imports = true [tool.pytest.ini_options] testpaths = ["tests"] python_files = ["test_*.py", "*_test.py"] python_functions = ["test_*"] python_classes = ["Test*"] addopts = [ "--strict-markers", "--strict-config", "--verbose", "--tb=short", "--cov=nebula_x", "--cov-report=term-missing", "--cov-report=html", "--cov-report=xml", ] markers = [ "slow: marks tests as slow (deselect with '-m \"not slow\"')", "gpu: marks tests that require GPU", "quantum: marks tests that require quantum simulation", "integration: marks tests as integration tests", "benchmark: marks tests as benchmark tests", ] filterwarnings = [ "ignore::UserWarning", "ignore::DeprecationWarning", ] [tool.coverage.run] source = ["nebula_x"] omit = [ "*/tests/*", "*/test_*", "setup.py", "*/venv/*", "*/.venv/*", ] [tool.coverage.report] exclude_lines = [ "pragma: no cover", "def __repr__", "if self.debug:", "if settings.DEBUG", "raise AssertionError", "raise NotImplementedError", "if 0:", "if __name__ == .__main__.:", "class .*\\bProtocol\\):", "@(abc\\.)?abstractmethod", ] [tool.flake8] max-line-length = 88 extend-ignore = ["E203", "E501", "W503"] max-complexity = 15 exclude = [ ".git", "__pycache__", "build", "dist", ".eggs", "*.egg-info", ".venv", "venv", ]