Update server1.py
Browse files- server1.py +0 -9
server1.py
CHANGED
|
@@ -1,5 +1,4 @@
|
|
| 1 |
# app.py
|
| 2 |
-
# comentarios sin tildes / sin enye
|
| 3 |
|
| 4 |
import os, io, traceback
|
| 5 |
from typing import Optional, List, Tuple
|
|
@@ -12,7 +11,6 @@ from functools import lru_cache
|
|
| 12 |
|
| 13 |
ImageFile.LOAD_TRUNCATED_IMAGES = True
|
| 14 |
|
| 15 |
-
# ===== caches (usar ruta propia, escribible en runtime) =====
|
| 16 |
CACHE_ROOT = os.environ.get("APP_CACHE", "/tmp/appcache")
|
| 17 |
os.environ["XDG_CACHE_HOME"] = CACHE_ROOT
|
| 18 |
os.environ["HF_HOME"] = os.path.join(CACHE_ROOT, "hf")
|
|
@@ -27,8 +25,6 @@ os.makedirs(os.environ["TORCH_HOME"], exist_ok=True)
|
|
| 27 |
import open_clip # importar despues de ajustar caches
|
| 28 |
|
| 29 |
# ===== limites basicos =====
|
| 30 |
-
# por defecto conservamos 1 hilo (tu baseline). Para probar mas:
|
| 31 |
-
# export NUM_THREADS=4 (o el valor que quieras) sin tocar codigo
|
| 32 |
NUM_THREADS = int(os.environ.get("NUM_THREADS", "1"))
|
| 33 |
torch.set_num_threads(NUM_THREADS)
|
| 34 |
os.environ["OMP_NUM_THREADS"] = str(NUM_THREADS)
|
|
@@ -65,7 +61,6 @@ clip_model = clip_model.to(device=DEVICE, dtype=DTYPE).eval()
|
|
| 65 |
for p in clip_model.parameters():
|
| 66 |
p.requires_grad = False
|
| 67 |
|
| 68 |
-
# extraer normalizacion y size desde el preprocess devuelto
|
| 69 |
normalize = next(t for t in getattr(preprocess, "transforms", []) if isinstance(t, T.Normalize))
|
| 70 |
SIZE = next((getattr(t, "size", None) for t in getattr(preprocess, "transforms", []) if hasattr(t, "size")), None)
|
| 71 |
if isinstance(SIZE, (tuple, list)):
|
|
@@ -117,8 +112,6 @@ if model_embeddings.shape[1] != img_dim or version_embeddings.shape[1] != img_di
|
|
| 117 |
f"versiones={version_embeddings.shape[1]}. Recalcula embeddings con {MODEL_NAME}."
|
| 118 |
)
|
| 119 |
|
| 120 |
-
# ===== cache perezosa de sub-embeddings por modelo_full =====
|
| 121 |
-
# no cambia precision; solo evita escanear version_labels en cada request
|
| 122 |
_versions_cache: dict[str, Tuple[List[str], torch.Tensor]] = {}
|
| 123 |
|
| 124 |
def _get_versions_subset(modelo_full: str) -> Tuple[List[str], Optional[torch.Tensor]]:
|
|
@@ -151,11 +144,9 @@ def _topk_cosine(text_feats: torch.Tensor, text_labels: List[str], img_feat: tor
|
|
| 151 |
return [{"label": text_labels[int(i)], "confidence": round(float(c)*100.0, 2)} for i, c in zip(idxs, conf)]
|
| 152 |
|
| 153 |
def process_image_bytes(front_bytes: bytes, back_bytes: Optional[bytes] = None):
|
| 154 |
-
# back se admite pero se ignora por completo
|
| 155 |
if not front_bytes or len(front_bytes) < 128:
|
| 156 |
raise UnidentifiedImageError("imagen invalida")
|
| 157 |
|
| 158 |
-
# solo frontal
|
| 159 |
img_front = Image.open(io.BytesIO(front_bytes))
|
| 160 |
img_feat = _encode_pil(img_front)
|
| 161 |
|
|
|
|
| 1 |
# app.py
|
|
|
|
| 2 |
|
| 3 |
import os, io, traceback
|
| 4 |
from typing import Optional, List, Tuple
|
|
|
|
| 11 |
|
| 12 |
ImageFile.LOAD_TRUNCATED_IMAGES = True
|
| 13 |
|
|
|
|
| 14 |
CACHE_ROOT = os.environ.get("APP_CACHE", "/tmp/appcache")
|
| 15 |
os.environ["XDG_CACHE_HOME"] = CACHE_ROOT
|
| 16 |
os.environ["HF_HOME"] = os.path.join(CACHE_ROOT, "hf")
|
|
|
|
| 25 |
import open_clip # importar despues de ajustar caches
|
| 26 |
|
| 27 |
# ===== limites basicos =====
|
|
|
|
|
|
|
| 28 |
NUM_THREADS = int(os.environ.get("NUM_THREADS", "1"))
|
| 29 |
torch.set_num_threads(NUM_THREADS)
|
| 30 |
os.environ["OMP_NUM_THREADS"] = str(NUM_THREADS)
|
|
|
|
| 61 |
for p in clip_model.parameters():
|
| 62 |
p.requires_grad = False
|
| 63 |
|
|
|
|
| 64 |
normalize = next(t for t in getattr(preprocess, "transforms", []) if isinstance(t, T.Normalize))
|
| 65 |
SIZE = next((getattr(t, "size", None) for t in getattr(preprocess, "transforms", []) if hasattr(t, "size")), None)
|
| 66 |
if isinstance(SIZE, (tuple, list)):
|
|
|
|
| 112 |
f"versiones={version_embeddings.shape[1]}. Recalcula embeddings con {MODEL_NAME}."
|
| 113 |
)
|
| 114 |
|
|
|
|
|
|
|
| 115 |
_versions_cache: dict[str, Tuple[List[str], torch.Tensor]] = {}
|
| 116 |
|
| 117 |
def _get_versions_subset(modelo_full: str) -> Tuple[List[str], Optional[torch.Tensor]]:
|
|
|
|
| 144 |
return [{"label": text_labels[int(i)], "confidence": round(float(c)*100.0, 2)} for i, c in zip(idxs, conf)]
|
| 145 |
|
| 146 |
def process_image_bytes(front_bytes: bytes, back_bytes: Optional[bytes] = None):
|
|
|
|
| 147 |
if not front_bytes or len(front_bytes) < 128:
|
| 148 |
raise UnidentifiedImageError("imagen invalida")
|
| 149 |
|
|
|
|
| 150 |
img_front = Image.open(io.BytesIO(front_bytes))
|
| 151 |
img_feat = _encode_pil(img_front)
|
| 152 |
|