Aduc-sdr-2_5s / builder.sh
carlex3321's picture
Update builder.sh
e2cc6b5 verified
raw
history blame
11.8 kB
#!/usr/bin/env bash
set -euo pipefail
echo "======================================================="
echo " ADUC-SDR — Builder (Triton + Apex + Q8 + FlashAttention + LayerNorm)"
echo "======================================================="
# ---------------------- Caches persistentes ----------------------
if [ -d /data ]; then
export HF_HOME="${HF_HOME:-/data/.cache/huggingface}"
export TORCH_HOME="${TORCH_HOME:-/data/.cache/torch}"
else
export HF_HOME="${HF_HOME:-/app/.cache/huggingface}"
export TORCH_HOME="${TORCH_HOME:-/app/.cache/torch}"
fi
export HF_HUB_CACHE="${HF_HUB_CACHE:-$HF_HOME/hub}"
mkdir -p "$HF_HOME" "$HF_HUB_CACHE" "$TORCH_HOME"
mkdir -p /app/.cache && ln -sf "$HF_HOME" /app/.cache/huggingface
# ---------------------- Config de wheels ----------------------
mkdir -p /app/wheels /app/wheels/src /app/cuda_cache
chmod -R 777 /app/wheels || true
export CUDA_CACHE_PATH="/app/cuda_cache"
# Repositório HF para upload de wheels (opcional)
export SELF_HF_REPO_ID="${SELF_HF_REPO_ID:-carlex3321/aduc-sdr}"
export HF_HUB_ENABLE_HF_TRANSFER="${HF_HUB_ENABLE_HF_TRANSFER:-1}"
export HF_HUB_DOWNLOAD_TIMEOUT="${HF_HUB_DOWNLOAD_TIMEOUT:-60}"
# CUDA/torch
export TORCH_CUDA_ARCH_LIST="${TORCH_CUDA_ARCH_LIST:-8.9}"
export MAX_JOBS="${MAX_JOBS:-$(nproc)}"
export CUDA_HOME="${CUDA_HOME:-/usr/local/cuda}"
# ---------------------- Dependências de build ----------------------
# Tags do ambiente
PY_TAG="$(python -c 'import sys; print(f"cp{sys.version_info[0]}{sys.version_info[1]}")' 2>/dev/null || echo cp310)"
TORCH_VER="$(python - <<'PY'
try:
import torch, re
v = torch.__version__
print(re.sub(r'\+.*$', '', v))
except Exception:
print("unknown")
PY
)"
CU_TAG="$(python - <<'PY'
try:
import torch
cu = getattr(torch.version, "cuda", None)
print("cu"+cu.replace(".","")) if cu else print("")
except Exception:
print("")
PY
)"
echo "[env] PY_TAG=${PY_TAG} TORCH_VER=${TORCH_VER} CU_TAG=${CU_TAG}"
# ---------------------- Helpers ----------------------
use_wheel_or_build() {
local pattern="$1"
local src="$2"
local fallback="$3"
local whl
whl="$(ls -t /app/wheels/${pattern} 2>/dev/null | head -n1 || true)"
if [ -n "$whl" ]; then
echo "📦 Instalando wheel encontrada: $whl"
pip install --no-deps "$whl" || { echo "⚠️ Falha ao instalar wheel $whl, tentando fallback..."; eval "$fallback"; }
else
echo "⚠️ Wheel não encontrada para padrão ${pattern}, executando fallback..."
eval "$fallback"
fi
}
install_from_hf () {
local PKG="$1"
python - "$PKG" "$PY_TAG" "$CU_TAG" <<'PY' || exit 0
import os, sys
from huggingface_hub import HfApi, hf_hub_download, HfFolder
pkg, py_tag, cu_tag = sys.argv[1], sys.argv[2], sys.argv[3]
repo = os.environ.get("SELF_HF_REPO_ID","carlex3321/aduc-sdr")
api = HfApi(token=os.getenv("HF_TOKEN") or HfFolder.get_token())
try:
files = api.list_repo_files(repo_id=repo, repo_type="model")
except Exception:
raise SystemExit(0)
cands = [f for f in files if f.endswith(".whl") and f.rsplit("/",1)[-1].startswith(pkg+"-") and py_tag in f]
pref = [f for f in cands if cu_tag and cu_tag in f] or cands
if not pref: raise SystemExit(0)
target = sorted(pref, reverse=True)[0]
print(target)
path = hf_hub_download(repo_id=repo, filename=target, repo_type="model", local_dir="/app/wheels")
print(path)
PY
}
hf_upload_wheels_if_available() {
# Faz upload de wheels geradas para o repo HF (opcional)
# Requer HF_TOKEN configurado (env)
local pkg_prefix="$1" # ex: triton-
if [ -z "${HF_TOKEN:-}" ]; then
echo "ℹ️ HF_TOKEN não definido; skip upload wheels ${pkg_prefix}"
return 0
fi
local wheels
wheels=$(ls -t /app/wheels/${pkg_prefix}*.whl 2>/dev/null || true)
if [ -z "$wheels" ]; then
echo "ℹ️ Nenhuma wheel ${pkg_prefix}* encontrada para upload."
return 0
fi
echo "☁️ Enviando wheels ${pkg_prefix}* para HF repo ${SELF_HF_REPO_ID} ..."
python - <<'PY'
import os
from huggingface_hub import HfApi, HfFolder
repo=os.environ.get("SELF_HF_REPO_ID","carlex3321/aduc-sdr")
token=os.getenv("HF_TOKEN") or HfFolder.get_token()
if not token:
raise SystemExit(0)
api=HfApi(token=token)
api.upload_folder(
folder_path="/app/wheels",
repo_id=repo,
repo_type="model",
allow_patterns=["*.whl","NGC-DL-CONTAINER-LICENSE"],
ignore_patterns=["**/src/**","**/*.log","**/logs/**",".git/**"],
)
print("Upload de wheels concluído.")
PY
}
# ---------------------- Checkers ----------------------
check_triton() {
python - <<'PY'
ok=False
try:
import triton, triton.language as tl
ok = hasattr(triton, "__version__") and callable(getattr(tl, "program_id", None))
except Exception:
ok=False
raise SystemExit(0 if ok else 1)
PY
}
check_apex() {
python - <<'PY'
try:
from apex.normalization import FusedLayerNorm, FusedRMSNorm
import importlib; importlib.import_module("fused_layer_norm_cuda")
ok = True
except Exception:
ok = False
raise SystemExit(0 if ok else 1)
PY
}
check_q8() {
python - <<'PY'
import importlib.util
spec = importlib.util.find_spec("ltx_q8_kernels") or importlib.util.find_spec("q8_kernels")
raise SystemExit(0 if spec else 1)
PY
}
check_flash() {
python - <<'PY'
ok = False
try:
import importlib
for name in ("flash_attn_2_cuda","flash_attn.ops.layer_norm","flash_attn.layers.layer_norm","flash_attn"):
try:
importlib.import_module(name); ok=True; break
except Exception:
pass
except Exception:
ok = False
raise SystemExit(0 if ok else 1)
PY
}
check_flash_layer_norm_wheel() {
python - <<'PY'
ok=False
try:
import importlib
# tenta símbolos típicos do LN nativo
for name in ("flash_attn.ops.layer_norm","flash_attn.layers.layer_norm"):
try:
m=importlib.import_module(name)
ok=True; break
except Exception:
pass
except Exception:
ok=False
raise SystemExit(0 if ok else 1)
PY
}
# ---------------------- Builders ----------------------
build_triton() {
echo "🔧 build_triton — preparando"
rm -rf /app/wheels/src/triton
git clone --depth 1 https://github.com/openai/triton.git /app/wheels/src/triton
# Sugeridos (opcional):
export TRITON_BUILD_WITH_CLANG_LLD=true
export TRITON_BUILD_WITH_CCACHE=true
export TRITON_HOME=/app/.triton
pushd /app/wheels/src/triton >/dev/null
pip wheel --no-build-isolation --no-deps . -w /app/wheels || true
popd >/dev/null
use_wheel_or_build "triton-*.whl" "/app/wheels/src/triton" "pip install --no-build-isolation /app/wheels/src/triton"
hf_upload_wheels_if_available "triton-"
echo "✅ build_triton — OK"
}
build_apex() {
echo "🔧 build_apex — preparando"
rm -rf /app/wheels/src/apex
git clone --depth 1 https://github.com/NVIDIA/apex.git /app/wheels/src/apex
pushd /app/wheels/src/apex >/dev/null
pip wheel --no-build-isolation --no-deps . -w /app/wheels || true
popd >/dev/null
use_wheel_or_build "apex-*.whl" "/app/wheels/src/apex" "pip install --no-build-isolation /app/wheels/src/apex --global-option=--cpp_ext --global-option=--cuda_ext || pip install --no-build-isolation /app/wheels/src/apex"
hf_upload_wheels_if_available "apex-"
echo "✅ build_apex — OK"
}
Q8_REPO="${Q8_REPO:-https://github.com/Lightricks/LTX-Video-Q8-Kernels.git}"
Q8_COMMIT="${Q8_COMMIT:-f3066edea210082799ca5a2bbf9ef0321c5dd8fc}"
build_q8 () {
echo "🔧 build_q8 — preparando"
local SRC="/app/wheels/src/q8_kernels"
rm -rf "$SRC"
git clone --filter=blob:none "$Q8_REPO" "$SRC"
git -C "$SRC" checkout "$Q8_COMMIT"
git -C "$SRC" submodule update --init --recursive
python -m pip wheel --no-build-isolation "$SRC" -w /app/wheels || true
local W="$(ls -t /app/wheels/q8_kernels-*.whl 2>/dev/null | head -n1 || true)"
if [ -n "${W}" ]; then
python -m pip install -U --no-deps "${W}" || true
else
python -m pip install --no-build-isolation "$SRC" || true
fi
hf_upload_wheels_if_available "q8_kernels-"
echo "✅ build_q8 — OK"
}
FLASH_ATTENTION_TAG="${FLASH_ATTENTION_TAG:-v2.8.3}"
build_flash () {
echo "🔧 build_flashattention — preparando"
set -e
local SRC="/app/wheels/src/flash-attn"
rm -rf "$SRC"
git clone --depth 1 --branch "$FLASH_ATTENTION_TAG" https://github.com/Dao-AILab/flash-attention.git "$SRC"
export TORCH_CUDA_ARCH_LIST="${TORCH_CUDA_ARCH_LIST:-8.9}"
export MAX_JOBS="${MAX_JOBS:-$(nproc)}"
export CUDA_HOME="${CUDA_HOME:-/usr/local/cuda}"
# 1) Pacote principal
python -m pip wheel --no-build-isolation --no-deps "$SRC" -w /app/wheels || true
local W_MAIN="$(ls -t /app/wheels/flash_attn-*.whl 2>/dev/null | head -n1 || true)"
if [ -n "${W_MAIN}" ]; then
python -m pip install -U --no-deps "${W_MAIN}" || true
else
python -m pip install --no-build-isolation "$SRC" || true
fi
# 2) Submódulo attn/csrc/layer_norm — empacotar e instalar
echo "🔧 flash-attn layer_norm — build wheel de csrc/layer_norm"
local LN_SRC="$SRC/attn/csrc/layer_norm"
if [ -d "$LN_SRC" ]; then
if [ -f "$LN_SRC/setup.py" ] || [ -f "$LN_SRC/pyproject.toml" ]; then
python -m pip wheel --no-build-isolation --no-deps "$LN_SRC" -w /app/wheels || true
local W_LN="$(ls -t /app/wheels/*layer*norm*whl /app/wheels/flash_attn_layer_norm-*.whl 2>/dev/null | head -n1 || true)"
if [ -n "${W_LN}" ]; then
python -m pip install -U --no-deps "${W_LN}" || true
else
pip install --no-build-isolation "$LN_SRC" || true
fi
else
# fallback simples se não houver metadados de build locais
pip install --no-build-isolation "$LN_SRC" || true
fi
else
echo "ℹ️ Diretório $LN_SRC não encontrado na tag $FLASH_ATTENTION_TAG; seguindo."
fi
hf_upload_wheels_if_available "flash_attn-"
echo "✅ build_flashattention — OK"
}
build_bitsandbytes() {
echo "🔧 build_bitsandbytes — instalando via PyPI"
pip install --upgrade --no-cache-dir bitsandbytes || true
echo "✅ build_bitsandbytes — OK"
}
# ---------------------- Orquestrador ----------------------
ensure_pkg () {
local PKG="$1"; local CHECK_FN="$2"; local BUILD_FN="$3"
if ${CHECK_FN}; then
echo "[flow] ${PKG}: já instalado"; return 0
fi
echo "[flow] ${PKG}: tentando wheel do Hub (${SELF_HF_REPO_ID})"
HF_OUT="$(install_from_hf "$PKG" || true)"
if [ -n "${HF_OUT:-}" ]; then
WHEEL_PATH="$(printf "%s\n" "${HF_OUT}" | tail -n1)"
python -m pip install -U --no-build-isolation "${WHEEL_PATH}" || true
if ${CHECK_FN}; then
echo "[flow] ${PKG}: sucesso via Hub (${WHEEL_PATH})"; return 0
fi
fi
echo "[flow] ${PKG}: compilando (fallback)"
${BUILD_FN} || true
${CHECK_FN} || echo "[flow] ${PKG}: falhou após build; seguindo"
}
# ---------------------- Execução ----------------------
# Ordem recomendada: Triton -> Apex -> Q8 -> FlashAttention (+ layer_norm) -> BitsAndBytes
#ensure_pkg "triton" check_triton
#build_triton
#|| true
#ensure_pkg "apex" check_apex build_apex || true
#ensure_pkg "q8_kernels" check_q8 build_q8 || true
#ensure_pkg "flash_attn" check_flash build_flash || true
# opcional
build_triton
build_apex
build_flash
build_q8
build_bitsandbytes
echo "======================================================="
echo " Build concluído. Resumo de módulos:"
python - <<'PY'
def check(m):
try:
mod = __import__(m)
print(f" - {m}: OK ({getattr(mod, '__version__', 'unknown')})")
except Exception as e:
print(f" - {m}: FAIL -> {e}")
for m in ["triton", "flash_attn", "bitsandbytes", "transformers", "diffusers"]:
check(m)
try:
from apex.normalization import FusedLayerNorm
print(" - apex.normalization: OK")
except Exception as e:
print(" - apex.normalization: FAIL ->", e)
PY
echo "======================================================="
echo " ✔️ Builder finalizado."