Musadiq Gilal
commited on
Commit
·
215b631
1
Parent(s):
663a305
Add FastAPI Docker backend for know-your-skin demo
Browse files- Dockerfile +23 -0
- api/__init__.py +5 -0
- api/__pycache__/__init__.cpython-312.pyc +0 -0
- api/__pycache__/server.cpython-312.pyc +0 -0
- api/server.py +127 -0
- app.py +3 -0
- config/__init__.py +5 -0
- config/__pycache__/__init__.cpython-312.pyc +0 -0
- config/__pycache__/concerns.cpython-312.pyc +0 -0
- config/__pycache__/cosmetic_copy.cpython-312.pyc +0 -0
- config/__pycache__/cosmetic_targets.cpython-312.pyc +0 -0
- config/__pycache__/hudson_products.cpython-312.pyc +0 -0
- config/concerns.py +224 -0
- config/cosmetic_copy.py +147 -0
- config/cosmetic_targets.py +52 -0
- config/hudson_products.py +209 -0
- lib/__pycache__/concern_inference.cpython-312.pyc +0 -0
- lib/__pycache__/cosmetic_inference.cpython-312.pyc +0 -0
- lib/__pycache__/cosmetic_reporting.cpython-312.pyc +0 -0
- lib/__pycache__/derm_local.cpython-312.pyc +0 -0
- lib/__pycache__/full_analysis.cpython-312.pyc +0 -0
- lib/__pycache__/recommendations.cpython-312.pyc +0 -0
- lib/__pycache__/reporting.cpython-312.pyc +0 -0
- lib/__pycache__/session_aggregation.cpython-312.pyc +0 -0
- lib/concern_inference.py +135 -0
- lib/cosmetic_inference.py +143 -0
- lib/cosmetic_reporting.py +185 -0
- lib/derm_local.py +47 -0
- lib/full_analysis.py +113 -0
- lib/recommendations.py +93 -0
- lib/reporting.py +215 -0
- lib/session_aggregation.py +61 -0
- models/cosmetic/fst_logreg.joblib +3 -0
- models/cosmetic/fst_mlp_model.joblib +3 -0
- models/cosmetic/fst_mlp_scaler.joblib +3 -0
- models/cosmetic/fst_scaler.joblib +3 -0
- models/cosmetic/monk_logreg.joblib +3 -0
- models/cosmetic/monk_scaler.joblib +3 -0
- models/logreg.joblib +3 -0
- models/scaler.joblib +3 -0
- models/scin_concerns_logreg.joblib +3 -0
- models/scin_concerns_scaler.joblib +3 -0
- requirements.txt +13 -0
Dockerfile
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
FROM python:3.10-slim
|
| 2 |
+
|
| 3 |
+
# Install basic system dependencies (you may extend this if builds fail)
|
| 4 |
+
RUN apt-get update && apt-get install -y --no-install-recommends \
|
| 5 |
+
build-essential \
|
| 6 |
+
&& rm -rf /var/lib/apt/lists/*
|
| 7 |
+
|
| 8 |
+
WORKDIR /app
|
| 9 |
+
|
| 10 |
+
# Install Python dependencies
|
| 11 |
+
COPY requirements.txt .
|
| 12 |
+
RUN pip install --no-cache-dir -r requirements.txt
|
| 13 |
+
|
| 14 |
+
# Copy application code
|
| 15 |
+
COPY . .
|
| 16 |
+
|
| 17 |
+
# Hugging Face Spaces route traffic to port 7860 by default
|
| 18 |
+
EXPOSE 7860
|
| 19 |
+
|
| 20 |
+
# Start FastAPI via Uvicorn
|
| 21 |
+
CMD ["uvicorn", "app:app", "--host", "0.0.0.0", "--port", "7860"]
|
| 22 |
+
|
| 23 |
+
|
api/__init__.py
ADDED
|
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
API package exposing HTTP endpoints for the skin analysis pipeline.
|
| 3 |
+
"""
|
| 4 |
+
|
| 5 |
+
|
api/__pycache__/__init__.cpython-312.pyc
ADDED
|
Binary file (216 Bytes). View file
|
|
|
api/__pycache__/server.cpython-312.pyc
ADDED
|
Binary file (5.65 kB). View file
|
|
|
api/server.py
ADDED
|
@@ -0,0 +1,127 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from tempfile import NamedTemporaryFile
|
| 2 |
+
from typing import Any, Dict, List
|
| 3 |
+
import os
|
| 4 |
+
import shutil
|
| 5 |
+
|
| 6 |
+
from fastapi import FastAPI, File, UploadFile, HTTPException
|
| 7 |
+
from fastapi.middleware.cors import CORSMiddleware
|
| 8 |
+
|
| 9 |
+
from lib.full_analysis import analyze_full_image, analyze_full_session
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
app = FastAPI(
|
| 13 |
+
title="Hudson Skin Analysis API",
|
| 14 |
+
description=(
|
| 15 |
+
"Local API for running Derm Foundation + SCIN-based cosmetic attributes "
|
| 16 |
+
"and concern analysis, returning a combined narrative report with Hudson "
|
| 17 |
+
"product recommendations."
|
| 18 |
+
),
|
| 19 |
+
version="0.2.0",
|
| 20 |
+
)
|
| 21 |
+
|
| 22 |
+
# For prototype purposes, allow all origins. You can tighten this later.
|
| 23 |
+
app.add_middleware(
|
| 24 |
+
CORSMiddleware,
|
| 25 |
+
allow_origins=["*"],
|
| 26 |
+
allow_credentials=True,
|
| 27 |
+
allow_methods=["*"],
|
| 28 |
+
allow_headers=["*"],
|
| 29 |
+
)
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
@app.get("/health")
|
| 33 |
+
def health() -> Dict[str, str]:
|
| 34 |
+
"""Simple health check endpoint."""
|
| 35 |
+
return {"status": "ok"}
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
@app.post("/analyze")
|
| 39 |
+
async def analyze(image: UploadFile = File(...)) -> Dict[str, Any]:
|
| 40 |
+
"""
|
| 41 |
+
Analyze a single skin image and return a combined JSON structure:
|
| 42 |
+
|
| 43 |
+
{
|
| 44 |
+
"cosmetic": { ... }, # tone / type education
|
| 45 |
+
"concerns": { ... } # concern sections + routine + disclaimer
|
| 46 |
+
}
|
| 47 |
+
|
| 48 |
+
Expected request (multipart/form-data):
|
| 49 |
+
- field name: 'image'
|
| 50 |
+
- value: image file (JPEG/PNG)
|
| 51 |
+
"""
|
| 52 |
+
if not image.filename:
|
| 53 |
+
raise HTTPException(status_code=400, detail="Uploaded file must have a filename.")
|
| 54 |
+
|
| 55 |
+
# Basic content-type check (relaxed for prototype)
|
| 56 |
+
if image.content_type and not image.content_type.startswith("image/"):
|
| 57 |
+
raise HTTPException(status_code=400, detail="Uploaded file must be an image.")
|
| 58 |
+
|
| 59 |
+
suffix = os.path.splitext(image.filename)[1] or ".jpg"
|
| 60 |
+
|
| 61 |
+
with NamedTemporaryFile(delete=False, suffix=suffix) as tmp:
|
| 62 |
+
tmp_path = tmp.name
|
| 63 |
+
shutil.copyfileobj(image.file, tmp)
|
| 64 |
+
|
| 65 |
+
try:
|
| 66 |
+
# cosmetic + concerns in one call so the mobile app can show both layers
|
| 67 |
+
result = analyze_full_image(tmp_path)
|
| 68 |
+
except Exception as exc: # pragma: no cover - thin wrapper
|
| 69 |
+
raise HTTPException(status_code=500, detail=f"Analysis failed: {exc}") from exc
|
| 70 |
+
finally:
|
| 71 |
+
try:
|
| 72 |
+
os.remove(tmp_path)
|
| 73 |
+
except OSError:
|
| 74 |
+
pass
|
| 75 |
+
|
| 76 |
+
return result
|
| 77 |
+
|
| 78 |
+
|
| 79 |
+
@app.post("/analyze_session")
|
| 80 |
+
async def analyze_session(images: List[UploadFile] = File(...)) -> Dict[str, Any]:
|
| 81 |
+
"""
|
| 82 |
+
Analyze multiple images from the same person and return a single
|
| 83 |
+
session-level combined JSON structure (cosmetic + concerns).
|
| 84 |
+
|
| 85 |
+
Expected request (multipart/form-data):
|
| 86 |
+
- field name: 'images'
|
| 87 |
+
- value: one or more image files (JPEG/PNG)
|
| 88 |
+
|
| 89 |
+
This uses the same report shape as /analyze, but aggregates
|
| 90 |
+
probabilities across all provided images before building the report.
|
| 91 |
+
"""
|
| 92 |
+
if not images:
|
| 93 |
+
raise HTTPException(status_code=400, detail="At least one image must be provided.")
|
| 94 |
+
|
| 95 |
+
tmp_paths: List[str] = []
|
| 96 |
+
|
| 97 |
+
try:
|
| 98 |
+
for image in images:
|
| 99 |
+
if not image.filename:
|
| 100 |
+
raise HTTPException(status_code=400, detail="Each uploaded file must have a filename.")
|
| 101 |
+
|
| 102 |
+
if image.content_type and not image.content_type.startswith("image/"):
|
| 103 |
+
raise HTTPException(status_code=400, detail="All uploaded files must be images.")
|
| 104 |
+
|
| 105 |
+
suffix = os.path.splitext(image.filename)[1] or ".jpg"
|
| 106 |
+
with NamedTemporaryFile(delete=False, suffix=suffix) as tmp:
|
| 107 |
+
tmp_path = tmp.name
|
| 108 |
+
shutil.copyfileobj(image.file, tmp)
|
| 109 |
+
tmp_paths.append(tmp_path)
|
| 110 |
+
|
| 111 |
+
result = analyze_full_session(tmp_paths)
|
| 112 |
+
except HTTPException:
|
| 113 |
+
# Re-raise FastAPI HTTP errors unchanged
|
| 114 |
+
raise
|
| 115 |
+
except Exception as exc: # pragma: no cover - thin wrapper
|
| 116 |
+
raise HTTPException(status_code=500, detail=f"Session analysis failed: {exc}") from exc
|
| 117 |
+
finally:
|
| 118 |
+
for path in tmp_paths:
|
| 119 |
+
try:
|
| 120 |
+
os.remove(path)
|
| 121 |
+
except OSError:
|
| 122 |
+
pass
|
| 123 |
+
|
| 124 |
+
return result
|
| 125 |
+
|
| 126 |
+
|
| 127 |
+
|
app.py
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from api.server import app
|
| 2 |
+
|
| 3 |
+
|
config/__init__.py
ADDED
|
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Configuration package for concern tags, mappings, and product recommendations.
|
| 3 |
+
"""
|
| 4 |
+
|
| 5 |
+
|
config/__pycache__/__init__.cpython-312.pyc
ADDED
|
Binary file (230 Bytes). View file
|
|
|
config/__pycache__/concerns.cpython-312.pyc
ADDED
|
Binary file (6.9 kB). View file
|
|
|
config/__pycache__/cosmetic_copy.cpython-312.pyc
ADDED
|
Binary file (5.64 kB). View file
|
|
|
config/__pycache__/cosmetic_targets.cpython-312.pyc
ADDED
|
Binary file (1.11 kB). View file
|
|
|
config/__pycache__/hudson_products.cpython-312.pyc
ADDED
|
Binary file (5.52 kB). View file
|
|
|
config/concerns.py
ADDED
|
@@ -0,0 +1,224 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Definitions for high-level skin concerns, mappings from SCIN condition labels,
|
| 3 |
+
and user-facing messaging + product hooks for the prototype.
|
| 4 |
+
"""
|
| 5 |
+
|
| 6 |
+
from typing import Dict, List
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
# Six customer-facing concern tags
|
| 10 |
+
CONCERN_TAGS: List[str] = [
|
| 11 |
+
"Dry_Sensitive",
|
| 12 |
+
"Breakouts_Bumps",
|
| 13 |
+
"Itchy_Hives",
|
| 14 |
+
"Red_Scaly_Patches",
|
| 15 |
+
"Pigment_Tone_Issues",
|
| 16 |
+
"Possible_Infection",
|
| 17 |
+
]
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
# Minimum dermatologist confidence (1–5) for using a SCIN label
|
| 21 |
+
MIN_DERM_CONFIDENCE: int = 3
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
# Mapping from SCIN condition label (exact string) to one or more concern tags.
|
| 25 |
+
# This is not exhaustive; we only map the most common / relevant conditions for
|
| 26 |
+
# this prototype. Unmapped conditions are ignored when building concern labels.
|
| 27 |
+
CONCERN_MAP: Dict[str, List[str]] = {
|
| 28 |
+
# Dry / sensitive / eczematous patterns
|
| 29 |
+
"Eczema": ["Dry_Sensitive"],
|
| 30 |
+
"Acute dermatitis, NOS": ["Dry_Sensitive"],
|
| 31 |
+
"Acute and chronic dermatitis": ["Dry_Sensitive"],
|
| 32 |
+
"Seborrheic Dermatitis": ["Dry_Sensitive", "Red_Scaly_Patches"],
|
| 33 |
+
"Irritant Contact Dermatitis": ["Dry_Sensitive"],
|
| 34 |
+
"Allergic Contact Dermatitis": ["Dry_Sensitive", "Itchy_Hives"],
|
| 35 |
+
"CD - Contact dermatitis": ["Dry_Sensitive"],
|
| 36 |
+
"Stasis Dermatitis": ["Dry_Sensitive", "Red_Scaly_Patches"],
|
| 37 |
+
"Lichen Simplex Chronicus": ["Dry_Sensitive"],
|
| 38 |
+
|
| 39 |
+
# Breakouts / bumps
|
| 40 |
+
"Acne": ["Breakouts_Bumps"],
|
| 41 |
+
"Folliculitis": ["Breakouts_Bumps"],
|
| 42 |
+
"Molluscum Contagiosum": ["Breakouts_Bumps"],
|
| 43 |
+
"Prurigo nodularis": ["Breakouts_Bumps", "Dry_Sensitive"],
|
| 44 |
+
|
| 45 |
+
# Itchy / hives / bites / hypersensitivity
|
| 46 |
+
"Insect Bite": ["Itchy_Hives"],
|
| 47 |
+
"Urticaria": ["Itchy_Hives"],
|
| 48 |
+
"Hypersensitivity": ["Itchy_Hives"],
|
| 49 |
+
"Viral Exanthem": ["Itchy_Hives", "Possible_Infection"],
|
| 50 |
+
|
| 51 |
+
# Red, scaly patches / inflammatory plaques
|
| 52 |
+
"Psoriasis": ["Red_Scaly_Patches"],
|
| 53 |
+
"Pityriasis rosea": ["Red_Scaly_Patches"],
|
| 54 |
+
"Lichen planus/lichenoid eruption": ["Red_Scaly_Patches"],
|
| 55 |
+
"Pityriasis lichenoides": ["Red_Scaly_Patches"],
|
| 56 |
+
"Cutaneous lupus": ["Red_Scaly_Patches"],
|
| 57 |
+
|
| 58 |
+
# Pigmentation / tone issues
|
| 59 |
+
"Post-Inflammatory hyperpigmentation": ["Pigment_Tone_Issues"],
|
| 60 |
+
"Pigmented purpuric eruption": ["Pigment_Tone_Issues"],
|
| 61 |
+
"Keratosis pilaris": ["Pigment_Tone_Issues", "Breakouts_Bumps"],
|
| 62 |
+
"O/E - ecchymoses present": ["Pigment_Tone_Issues"],
|
| 63 |
+
|
| 64 |
+
# Possible infections (use carefully, non-diagnostic wording)
|
| 65 |
+
"Tinea": ["Possible_Infection"],
|
| 66 |
+
"Tinea Versicolor": ["Possible_Infection", "Pigment_Tone_Issues"],
|
| 67 |
+
"Impetigo": ["Possible_Infection"],
|
| 68 |
+
"Herpes Zoster": ["Possible_Infection"],
|
| 69 |
+
"Herpes Simplex": ["Possible_Infection"],
|
| 70 |
+
"Cellulitis": ["Possible_Infection"],
|
| 71 |
+
"Abscess": ["Possible_Infection"],
|
| 72 |
+
"Scabies": ["Possible_Infection", "Itchy_Hives"],
|
| 73 |
+
}
|
| 74 |
+
|
| 75 |
+
|
| 76 |
+
# User-facing messaging and product hooks per concern tag.
|
| 77 |
+
# Product IDs are placeholders for now – replace with real SKUs from your catalog.
|
| 78 |
+
CONCERN_CONFIG: Dict[str, Dict[str, object]] = {
|
| 79 |
+
"Dry_Sensitive": {
|
| 80 |
+
"title": "Dry or Sensitive Skin Tendencies",
|
| 81 |
+
"description": (
|
| 82 |
+
"Your photo shows patterns often associated with dry or easily irritated skin. "
|
| 83 |
+
"This may include visible flaking, redness, or rough patches."
|
| 84 |
+
),
|
| 85 |
+
"what_it_means": (
|
| 86 |
+
"These features suggest that your skin barrier may be a bit fragile or dehydrated, "
|
| 87 |
+
"so it can lose moisture more quickly and react to harsh products or weather changes."
|
| 88 |
+
),
|
| 89 |
+
"care_focus": (
|
| 90 |
+
"Focus on gentle cleansing, replenishing moisture, and protecting the skin barrier with "
|
| 91 |
+
"soothing, fragrance-free products."
|
| 92 |
+
),
|
| 93 |
+
"disclaimer": (
|
| 94 |
+
"This is an AI-based cosmetic skin analysis, not a medical diagnosis. "
|
| 95 |
+
"If you have pain, rapid changes, or health concerns, please see a dermatologist."
|
| 96 |
+
),
|
| 97 |
+
"recommended_products": [
|
| 98 |
+
"hydrating_cleanser",
|
| 99 |
+
"barrier_repair_moisturizer",
|
| 100 |
+
"gentle_spf",
|
| 101 |
+
],
|
| 102 |
+
},
|
| 103 |
+
"Breakouts_Bumps": {
|
| 104 |
+
"title": "Breakouts and Bumps",
|
| 105 |
+
"description": (
|
| 106 |
+
"We detect patterns often seen with clogged pores, breakouts, or follicle-related bumps."
|
| 107 |
+
),
|
| 108 |
+
"what_it_means": (
|
| 109 |
+
"This can happen when excess oil, dead skin, or bacteria build up in pores or around hair follicles, "
|
| 110 |
+
"leading to blemishes or tiny bumps on the skin."
|
| 111 |
+
),
|
| 112 |
+
"care_focus": (
|
| 113 |
+
"Focus on balancing oil, gently exfoliating to keep pores clear, and avoiding products that clog pores."
|
| 114 |
+
),
|
| 115 |
+
"disclaimer": (
|
| 116 |
+
"This is not a medical diagnosis. For severe, painful, or persistent acne, "
|
| 117 |
+
"consult a dermatologist."
|
| 118 |
+
),
|
| 119 |
+
"recommended_products": [
|
| 120 |
+
"clarifying_cleanser",
|
| 121 |
+
"lightweight_noncomedogenic_moisturizer",
|
| 122 |
+
"targeted_bha_serum",
|
| 123 |
+
],
|
| 124 |
+
},
|
| 125 |
+
"Itchy_Hives": {
|
| 126 |
+
"title": "Itch, Hives, or Bite-like Areas",
|
| 127 |
+
"description": (
|
| 128 |
+
"Your image shows patterns that can be seen with itchy, hive-like, or bite-like areas."
|
| 129 |
+
),
|
| 130 |
+
"what_it_means": (
|
| 131 |
+
"These patterns may reflect irritation or a reactive response in the skin, which can be triggered by "
|
| 132 |
+
"many factors such as friction, bites, or contact with allergens."
|
| 133 |
+
),
|
| 134 |
+
"care_focus": (
|
| 135 |
+
"Focus on calming and protecting the skin, avoiding harsh products, and keeping the area moisturized "
|
| 136 |
+
"without over-scrubbing."
|
| 137 |
+
),
|
| 138 |
+
"disclaimer": (
|
| 139 |
+
"Only a clinician can diagnose allergic reactions or bites. "
|
| 140 |
+
"If symptoms are severe, spreading, or accompanied by other signs, seek medical care."
|
| 141 |
+
),
|
| 142 |
+
"recommended_products": [
|
| 143 |
+
"soothing_lotion",
|
| 144 |
+
"fragrance_free_moisturizer",
|
| 145 |
+
],
|
| 146 |
+
},
|
| 147 |
+
"Red_Scaly_Patches": {
|
| 148 |
+
"title": "Red or Scaly Patches",
|
| 149 |
+
"description": (
|
| 150 |
+
"We see features that may correspond to red, scaly, or plaque-like areas on the skin."
|
| 151 |
+
),
|
| 152 |
+
"what_it_means": (
|
| 153 |
+
"Red and scaly areas can appear when the skin is inflamed and turning over more quickly, "
|
| 154 |
+
"which is seen in several common skin conditions."
|
| 155 |
+
),
|
| 156 |
+
"care_focus": (
|
| 157 |
+
"Focus on very gentle cleansing, rich but non-irritating moisturizers, and daily sun protection "
|
| 158 |
+
"to support the skin while you discuss any persistent patches with a professional."
|
| 159 |
+
),
|
| 160 |
+
"disclaimer": (
|
| 161 |
+
"Conditions like psoriasis or dermatitis require professional diagnosis. "
|
| 162 |
+
"This analysis is cosmetic only and may miss important findings."
|
| 163 |
+
),
|
| 164 |
+
"recommended_products": [
|
| 165 |
+
"rich_emollient_cream",
|
| 166 |
+
"gentle_scalp_or_body_wash",
|
| 167 |
+
],
|
| 168 |
+
},
|
| 169 |
+
"Pigment_Tone_Issues": {
|
| 170 |
+
"title": "Pigment and Tone Irregularities",
|
| 171 |
+
"description": (
|
| 172 |
+
"Your photo shows patterns that can be associated with uneven skin tone or spots, "
|
| 173 |
+
"such as post-blemish marks or areas of darker or lighter pigmentation."
|
| 174 |
+
),
|
| 175 |
+
"what_it_means": (
|
| 176 |
+
"These findings suggest that your skin may be developing or holding onto areas of extra or reduced pigment, "
|
| 177 |
+
"often after breakouts, sun exposure, or previous irritation."
|
| 178 |
+
),
|
| 179 |
+
"care_focus": (
|
| 180 |
+
"Focus on consistent sun protection and targeted brightening steps, while keeping the skin barrier healthy "
|
| 181 |
+
"and avoiding aggressive at-home treatments."
|
| 182 |
+
),
|
| 183 |
+
"disclaimer": (
|
| 184 |
+
"This is not a diagnosis of any pigment disorder. "
|
| 185 |
+
"If you notice rapidly changing spots or moles, see a dermatologist."
|
| 186 |
+
),
|
| 187 |
+
"recommended_products": [
|
| 188 |
+
"brightening_serum",
|
| 189 |
+
"daily_spf",
|
| 190 |
+
"gentle_exfoliant",
|
| 191 |
+
],
|
| 192 |
+
},
|
| 193 |
+
"Possible_Infection": {
|
| 194 |
+
"title": "Patterns Sometimes Seen in Infections",
|
| 195 |
+
"description": (
|
| 196 |
+
"Some features in the image can be seen in skin infections or overgrowth of microbes. "
|
| 197 |
+
"Only a medical professional can confirm this."
|
| 198 |
+
),
|
| 199 |
+
"what_it_means": (
|
| 200 |
+
"This may reflect areas where the skin barrier is disrupted and microbes can more easily grow, "
|
| 201 |
+
"but many look-alike conditions exist, so only a clinician can tell what is really going on."
|
| 202 |
+
),
|
| 203 |
+
"care_focus": (
|
| 204 |
+
"Focus on gentle cleansing, supporting the skin barrier, and seeking in-person medical advice for "
|
| 205 |
+
"painful, spreading, or worrying areas."
|
| 206 |
+
),
|
| 207 |
+
"disclaimer": (
|
| 208 |
+
"This tool cannot rule out serious conditions. If you have pain, fever, "
|
| 209 |
+
"rapid spreading, or feel unwell, please seek urgent medical advice."
|
| 210 |
+
),
|
| 211 |
+
"recommended_products": [
|
| 212 |
+
"gentle_cleanser",
|
| 213 |
+
"barrier_support_moisturizer",
|
| 214 |
+
],
|
| 215 |
+
},
|
| 216 |
+
}
|
| 217 |
+
|
| 218 |
+
|
| 219 |
+
def get_concern_index_map() -> Dict[str, int]:
|
| 220 |
+
"""Return a mapping from concern tag name to column index."""
|
| 221 |
+
return {name: idx for idx, name in enumerate(CONCERN_TAGS)}
|
| 222 |
+
|
| 223 |
+
|
| 224 |
+
|
config/cosmetic_copy.py
ADDED
|
@@ -0,0 +1,147 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Copy and educational content for cosmetic attributes.
|
| 3 |
+
|
| 4 |
+
This is used by the cosmetic reporting layer to turn predictions into
|
| 5 |
+
user-friendly text. All wording here is cosmetic/non-medical.
|
| 6 |
+
"""
|
| 7 |
+
|
| 8 |
+
from typing import Dict, Any
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
# Fitzpatrick skin type copy
|
| 12 |
+
FST_COPY: Dict[str, Dict[str, Any]] = {
|
| 13 |
+
"FST1": {
|
| 14 |
+
"title": "Fitzpatrick Type I (very fair, always burns)",
|
| 15 |
+
"overview": (
|
| 16 |
+
"This type of skin is very fair and tends to burn easily with minimal sun exposure "
|
| 17 |
+
"and rarely tans. It often needs extra gentle care and diligent daily sun protection."
|
| 18 |
+
),
|
| 19 |
+
"care_focus": (
|
| 20 |
+
"Prioritize broad-spectrum sunscreen every day, seek shade where possible, and build up active "
|
| 21 |
+
"ingredients gradually to avoid irritation. Hydrating, barrier-supporting products are usually helpful."
|
| 22 |
+
),
|
| 23 |
+
},
|
| 24 |
+
"FST2": {
|
| 25 |
+
"title": "Fitzpatrick Type II (fair, usually burns, may tan lightly)",
|
| 26 |
+
"overview": (
|
| 27 |
+
"This type of skin is fair and can burn with sun exposure, sometimes followed by a light tan. "
|
| 28 |
+
"It has moderate sensitivity to the sun."
|
| 29 |
+
),
|
| 30 |
+
"care_focus": (
|
| 31 |
+
"Daily SPF and gentle hydrators are key. Introduce exfoliants and brightening ingredients slowly and "
|
| 32 |
+
"watch for dryness or redness."
|
| 33 |
+
),
|
| 34 |
+
},
|
| 35 |
+
"FST3": {
|
| 36 |
+
"title": "Fitzpatrick Type III (medium, sometimes burns, gradually tans)",
|
| 37 |
+
"overview": (
|
| 38 |
+
"This type of skin has a light-to-medium tone, may burn with strong sun at first, and then gradually tans. "
|
| 39 |
+
"It can be somewhat prone to uneven tone or post-blemish marks."
|
| 40 |
+
),
|
| 41 |
+
"care_focus": (
|
| 42 |
+
"Consistent SPF helps prevent dark spots. Balanced routines with gentle exfoliation and brightening products "
|
| 43 |
+
"can support an even, healthy-looking tone."
|
| 44 |
+
),
|
| 45 |
+
},
|
| 46 |
+
"FST4": {
|
| 47 |
+
"title": "Fitzpatrick Type IV (olive to brown, rarely burns, tans easily)",
|
| 48 |
+
"overview": (
|
| 49 |
+
"This type of skin is naturally deeper in color and tends to tan easily while rarely burning. "
|
| 50 |
+
"It may be more prone to pigmentation changes after irritation or breakouts."
|
| 51 |
+
),
|
| 52 |
+
"care_focus": (
|
| 53 |
+
"Daily sunscreen is still important to protect against long-term damage and uneven tone. "
|
| 54 |
+
"Focus on gentle, non-irritating actives and hydrating care to support a smooth, radiant complexion."
|
| 55 |
+
),
|
| 56 |
+
},
|
| 57 |
+
"FST5": {
|
| 58 |
+
"id": "FST5",
|
| 59 |
+
"title": "Fitzpatrick Type V (brown, rarely burns, tans deeply)",
|
| 60 |
+
"overview": (
|
| 61 |
+
"This type of skin has a deeper brown tone, usually tans deeply and rarely burns. "
|
| 62 |
+
"It can be more prone to dark marks lingering after inflammation."
|
| 63 |
+
),
|
| 64 |
+
"care_focus": (
|
| 65 |
+
"Gentle daily cleansing, consistent SPF, and pigment-safe brightening ingredients can help manage "
|
| 66 |
+
"uneven tone while maintaining the skin's natural richness."
|
| 67 |
+
),
|
| 68 |
+
},
|
| 69 |
+
"FST6": {
|
| 70 |
+
"title": "Fitzpatrick Type VI (very dark, deeply pigmented)",
|
| 71 |
+
"overview": (
|
| 72 |
+
"This type of skin is very richly pigmented and typically does not burn, but can still experience sun damage "
|
| 73 |
+
"and is often prone to visible dark marks after irritation."
|
| 74 |
+
),
|
| 75 |
+
"care_focus": (
|
| 76 |
+
"Broad-spectrum sunscreen is still valuable to protect skin health and tone. Focus on barrier support, "
|
| 77 |
+
"hydration, and gentle brightening where needed, avoiding harsh or stripping products."
|
| 78 |
+
),
|
| 79 |
+
},
|
| 80 |
+
}
|
| 81 |
+
|
| 82 |
+
|
| 83 |
+
# Monk tone copy grouped into simple ranges for education.
|
| 84 |
+
MONK_TONE_RANGES = [
|
| 85 |
+
{"name": "lighter tones", "range": range(1, 4)},
|
| 86 |
+
{"name": "medium tones", "range": range(4, 8)},
|
| 87 |
+
{"name": "deeper tones", "range": range(8, 11)},
|
| 88 |
+
]
|
| 89 |
+
|
| 90 |
+
MONK_COPY: Dict[str, str] = {
|
| 91 |
+
"lighter tones": (
|
| 92 |
+
"On lighter skin tones, redness and sunburn often show up quickly, while dark marks may appear tan or pink. "
|
| 93 |
+
"Consistent sunscreen and gentle barrier care help prevent irritation and long-term discoloration."
|
| 94 |
+
),
|
| 95 |
+
"medium tones": (
|
| 96 |
+
"On medium skin tones, both redness and dark marks can appear, especially after breakouts or sun exposure. "
|
| 97 |
+
"Daily SPF and balanced routines with hydrating and brightening ingredients support an even complexion."
|
| 98 |
+
),
|
| 99 |
+
"deeper tones": (
|
| 100 |
+
"On deeper skin tones, dark spots and uneven patches may linger longer, even if redness is less visible. "
|
| 101 |
+
"Sun protection plus pigment-safe brightening products can help maintain a smooth, radiant tone."
|
| 102 |
+
),
|
| 103 |
+
}
|
| 104 |
+
|
| 105 |
+
|
| 106 |
+
# Texture copy per tag.
|
| 107 |
+
TEXTURE_COPY: Dict[str, Dict[str, str]] = {
|
| 108 |
+
"Texture_Smooth": {
|
| 109 |
+
"title": "Mostly smooth texture",
|
| 110 |
+
"body": (
|
| 111 |
+
"Your skin texture looks mostly smooth and even in this photo. That’s a great foundation. "
|
| 112 |
+
"A gentle cleanser, consistent moisturizer, and daily sunscreen can help keep it that way."
|
| 113 |
+
),
|
| 114 |
+
},
|
| 115 |
+
"Texture_Bumpy": {
|
| 116 |
+
"title": "Uneven or bumpy texture",
|
| 117 |
+
"body": (
|
| 118 |
+
"We see some raised or bumpy areas that can happen with clogged pores or small textural changes. "
|
| 119 |
+
"Gentle exfoliation, non-comedogenic hydrators, and balanced oil control can help smooth the look over time."
|
| 120 |
+
),
|
| 121 |
+
},
|
| 122 |
+
"Texture_Rough_Flakey": {
|
| 123 |
+
"title": "Dry or flaky texture",
|
| 124 |
+
"body": (
|
| 125 |
+
"Your skin shows some rough or flaky areas, which often indicate dryness or a compromised barrier. "
|
| 126 |
+
"Hydrating cleansers, richer moisturizers, and barrier-supporting ingredients can help restore softness."
|
| 127 |
+
),
|
| 128 |
+
},
|
| 129 |
+
"Texture_Fluid_Filled": {
|
| 130 |
+
"title": "Fluid-filled or very raised areas",
|
| 131 |
+
"body": (
|
| 132 |
+
"We notice some fluid-filled or very raised areas. While some bumps can be part of normal skin, "
|
| 133 |
+
"sudden or painful changes are best reviewed with a dermatologist. In the meantime, focus on gentle care "
|
| 134 |
+
"and avoid picking or harsh scrubs."
|
| 135 |
+
),
|
| 136 |
+
},
|
| 137 |
+
}
|
| 138 |
+
|
| 139 |
+
|
| 140 |
+
COSMETIC_DISCLAIMER = (
|
| 141 |
+
"The information above describes cosmetic characteristics such as skin tone and texture based on this single photo. "
|
| 142 |
+
"It is not a medical diagnosis and may not capture the full picture of your skin. For any concerns, persistent "
|
| 143 |
+
"changes, or symptoms like pain or bleeding, please consult a dermatologist or healthcare professional."
|
| 144 |
+
)
|
| 145 |
+
|
| 146 |
+
|
| 147 |
+
|
config/cosmetic_targets.py
ADDED
|
@@ -0,0 +1,52 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Canonical label mappings for cosmetic skin attributes derived from SCIN:
|
| 3 |
+
|
| 4 |
+
- Fitzpatrick Skin Type (FST I–VI)
|
| 5 |
+
- Monk Skin Tone (1–10)
|
| 6 |
+
- Texture tags (smooth / bumpy / rough/flaky / optional fluid-filled)
|
| 7 |
+
|
| 8 |
+
These mappings are used by the cosmetic attribute training and inference code.
|
| 9 |
+
"""
|
| 10 |
+
|
| 11 |
+
from typing import Dict, List
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
# Canonical Fitzpatrick labels we expose in our cosmetic layer.
|
| 15 |
+
# SCIN stores values such as "FST1" .. "FST6" or "NONE_SELECTED".
|
| 16 |
+
FST_LABELS: List[str] = ["FST1", "FST2", "FST3", "FST4", "FST5", "FST6"]
|
| 17 |
+
|
| 18 |
+
# Values in the SCIN CSV for Fitzpatrick skin type from users / dermatologists
|
| 19 |
+
# are expected to match these strings. Any value not in this set (e.g.
|
| 20 |
+
# "NONE_SELECTED" or missing) will be treated as unknown and skipped for
|
| 21 |
+
# training.
|
| 22 |
+
VALID_FST_VALUES = set(FST_LABELS)
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
# Monk skin tone labels (1–10). SCIN stores these as integers 1..10.
|
| 26 |
+
MONK_TONE_VALUES: List[int] = list(range(1, 11))
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
# Cosmetic texture tags we expose in our report.
|
| 30 |
+
TEXTURE_TAGS: List[str] = [
|
| 31 |
+
"Texture_Smooth",
|
| 32 |
+
"Texture_Bumpy",
|
| 33 |
+
"Texture_Rough_Flakey",
|
| 34 |
+
"Texture_Fluid_Filled",
|
| 35 |
+
]
|
| 36 |
+
|
| 37 |
+
# Mapping from SCIN textures_* column suffix to our cosmetic texture tags.
|
| 38 |
+
# SCIN schema describes textures_* booleans with suffixes such as:
|
| 39 |
+
# - TEXTURE_UNSPECIFIED
|
| 40 |
+
# - RAISED_OR_BUMPY
|
| 41 |
+
# - FLAT
|
| 42 |
+
# - ROUGH_OR_FLAKY
|
| 43 |
+
# - FLUID_FILLED
|
| 44 |
+
TEXTURE_FIELD_TO_TAG: Dict[str, str] = {
|
| 45 |
+
"RAISED_OR_BUMPY": "Texture_Bumpy",
|
| 46 |
+
"ROUGH_OR_FLAKY": "Texture_Rough_Flakey",
|
| 47 |
+
"FLAT": "Texture_Smooth",
|
| 48 |
+
"FLUID_FILLED": "Texture_Fluid_Filled",
|
| 49 |
+
}
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
|
config/hudson_products.py
ADDED
|
@@ -0,0 +1,209 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Product configuration for Hudson skincare products, normalized for recommendations.
|
| 3 |
+
|
| 4 |
+
We derive a compact schema from `hudson-product-portfolio/hudson_skincare_products.json`
|
| 5 |
+
and add:
|
| 6 |
+
- a stable `id` for each product
|
| 7 |
+
- a primary routine `step` (cleanser / treatment / moisturizer / sunscreen / other)
|
| 8 |
+
- which concern tags each product is suitable for
|
| 9 |
+
|
| 10 |
+
Concern tags must match those defined in `config.concerns.CONCERN_TAGS`.
|
| 11 |
+
"""
|
| 12 |
+
|
| 13 |
+
from typing import Dict, List, TypedDict
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
class ProductConfig(TypedDict):
|
| 17 |
+
id: str
|
| 18 |
+
name: str
|
| 19 |
+
step: str
|
| 20 |
+
supported_concerns: List[str]
|
| 21 |
+
portfolio: str | None
|
| 22 |
+
why_template: str | None
|
| 23 |
+
# Optional image filename (e.g. "daily_moisturizer.png") relative to a
|
| 24 |
+
# known product-images directory. The API will surface this so the client
|
| 25 |
+
# can display product images when available.
|
| 26 |
+
image_name: str | None
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
# Normalized configuration for the current Hudson product list.
|
| 30 |
+
# Keys are stable product IDs; values include step and concern mapping.
|
| 31 |
+
PRODUCT_CONFIG: Dict[str, ProductConfig] = {
|
| 32 |
+
# Hydration / Dry & Sensitive focused
|
| 33 |
+
"b5_serum": {
|
| 34 |
+
"id": "b5_serum",
|
| 35 |
+
"name": "B5 Serum",
|
| 36 |
+
"step": "treatment",
|
| 37 |
+
"supported_concerns": ["Dry_Sensitive", "Pigment_Tone_Issues"],
|
| 38 |
+
"portfolio": "Hydration/Moisturising",
|
| 39 |
+
"why_template": (
|
| 40 |
+
"Because your skin shows signs of dryness or tone irregularities, "
|
| 41 |
+
"we suggest {name}, a lightweight hydrating serum that helps plump and support the skin barrier."
|
| 42 |
+
),
|
| 43 |
+
"image_name": None,
|
| 44 |
+
},
|
| 45 |
+
"body_clarifying_cleanser": {
|
| 46 |
+
"id": "body_clarifying_cleanser",
|
| 47 |
+
"name": "Body Clarifying Cleanser",
|
| 48 |
+
"step": "cleanser",
|
| 49 |
+
"supported_concerns": ["Dry_Sensitive", "Itchy_Hives", "Possible_Infection", "Red_Scaly_Patches"],
|
| 50 |
+
"portfolio": "Hydration/Moisturising",
|
| 51 |
+
"why_template": (
|
| 52 |
+
"We recommend {name}, a gentle body cleanser designed to cleanse and moisturize dry or reactive skin "
|
| 53 |
+
"without harsh surfactants."
|
| 54 |
+
),
|
| 55 |
+
"image_name": "body_clarifying_cleanser.png",
|
| 56 |
+
},
|
| 57 |
+
"daily_moisturizer": {
|
| 58 |
+
"id": "daily_moisturizer",
|
| 59 |
+
"name": "Daily Moisturizer",
|
| 60 |
+
"step": "moisturizer",
|
| 61 |
+
"supported_concerns": ["Dry_Sensitive", "Red_Scaly_Patches", "Itchy_Hives"],
|
| 62 |
+
"portfolio": "Hydration/Moisturising",
|
| 63 |
+
"why_template": (
|
| 64 |
+
"{name} is a everyday moisturizer formulated for sensitive, dry, or redness-prone skin, "
|
| 65 |
+
"to help keep the barrier comfortable and hydrated."
|
| 66 |
+
),
|
| 67 |
+
"image_name": "daily_moisturizer.png",
|
| 68 |
+
},
|
| 69 |
+
"facial_moisture_balancing_cleanser": {
|
| 70 |
+
"id": "facial_moisture_balancing_cleanser",
|
| 71 |
+
"name": "Facial Moisture Balancing Cleanser",
|
| 72 |
+
"step": "cleanser",
|
| 73 |
+
"supported_concerns": ["Dry_Sensitive", "Red_Scaly_Patches", "Itchy_Hives"],
|
| 74 |
+
"portfolio": "Hydration/Moisturising",
|
| 75 |
+
"why_template": (
|
| 76 |
+
"{name} is a gentle facial cleanser that helps remove impurities while respecting a dry or sensitive skin barrier."
|
| 77 |
+
),
|
| 78 |
+
"image_name": None,
|
| 79 |
+
},
|
| 80 |
+
"moisture_balancing_cleanser": {
|
| 81 |
+
"id": "moisture_balancing_cleanser",
|
| 82 |
+
"name": "Moisture Balancing Cleanser",
|
| 83 |
+
"step": "cleanser",
|
| 84 |
+
"supported_concerns": ["Dry_Sensitive", "Itchy_Hives", "Red_Scaly_Patches", "Breakouts_Bumps"],
|
| 85 |
+
"portfolio": "Hydration+Anti Acne",
|
| 86 |
+
"why_template": (
|
| 87 |
+
"{name} gently cleanses while supporting dry, sensitive, or acne-prone areas with a hypochlorous-based formula."
|
| 88 |
+
),
|
| 89 |
+
# Map the generic "hudson-cleanser.png" image to this cleanser.
|
| 90 |
+
"image_name": "hudson-cleanser.png",
|
| 91 |
+
},
|
| 92 |
+
|
| 93 |
+
# Anti-acne portfolio / breakouts
|
| 94 |
+
"blemish_age_defense_serum": {
|
| 95 |
+
"id": "blemish_age_defense_serum",
|
| 96 |
+
"name": "Blemish + Age Defense Serum",
|
| 97 |
+
"step": "treatment",
|
| 98 |
+
"supported_concerns": ["Breakouts_Bumps", "Pigment_Tone_Issues"],
|
| 99 |
+
"portfolio": "Anti Acne Portfolio",
|
| 100 |
+
"why_template": (
|
| 101 |
+
"Since we see features linked to breakouts or post-blemish marks, {name} can help clarify pores, "
|
| 102 |
+
"balance oil, and improve the look of blemishes over time."
|
| 103 |
+
),
|
| 104 |
+
"image_name": "blemish_plus_age_defense_serum.png",
|
| 105 |
+
},
|
| 106 |
+
"salicylic_acid_cleanser": {
|
| 107 |
+
"id": "salicylic_acid_cleanser",
|
| 108 |
+
"name": "Salicylic Acid Cleanser",
|
| 109 |
+
"step": "cleanser",
|
| 110 |
+
"supported_concerns": ["Breakouts_Bumps", "Possible_Infection"],
|
| 111 |
+
"portfolio": "Anti Acne Portfolio",
|
| 112 |
+
"why_template": (
|
| 113 |
+
"{name} combines salicylic acid with a hypochlorous base to help reduce excess oil, clear pores, "
|
| 114 |
+
"and support skin that is prone to acne breakouts."
|
| 115 |
+
),
|
| 116 |
+
"image_name": "salicylic_acid_cleanser.png",
|
| 117 |
+
},
|
| 118 |
+
"silymarin_c15_serum": {
|
| 119 |
+
"id": "silymarin_c15_serum",
|
| 120 |
+
"name": "Silymarin C15 Serum",
|
| 121 |
+
"step": "treatment",
|
| 122 |
+
"supported_concerns": ["Breakouts_Bumps", "Pigment_Tone_Issues"],
|
| 123 |
+
"portfolio": "Anti Acne Portfolio",
|
| 124 |
+
"why_template": (
|
| 125 |
+
"{name} pairs vitamin C with targeted actives to help reduce blemishes, refine texture, "
|
| 126 |
+
"and support a more even-looking tone."
|
| 127 |
+
),
|
| 128 |
+
"image_name": "silymarin_c15_serum.png",
|
| 129 |
+
},
|
| 130 |
+
|
| 131 |
+
# Pigment / anti-discoloration / antioxidant
|
| 132 |
+
"brightening_cream": {
|
| 133 |
+
"id": "brightening_cream",
|
| 134 |
+
"name": "Brightening Cream",
|
| 135 |
+
"step": "treatment",
|
| 136 |
+
"supported_concerns": ["Pigment_Tone_Issues"],
|
| 137 |
+
"portfolio": "Anti Ageing/ Anti Discoloration Portfolio",
|
| 138 |
+
"why_template": (
|
| 139 |
+
"Because your skin shows tone irregularities or dark spots, {name} is a night treatment designed to "
|
| 140 |
+
"gradually fade discoloration and brighten overall radiance."
|
| 141 |
+
),
|
| 142 |
+
"image_name": None,
|
| 143 |
+
},
|
| 144 |
+
"discoloration_defense_serum": {
|
| 145 |
+
"id": "discoloration_defense_serum",
|
| 146 |
+
"name": "Discoloration Defense Serum",
|
| 147 |
+
"step": "treatment",
|
| 148 |
+
"supported_concerns": ["Pigment_Tone_Issues"],
|
| 149 |
+
"portfolio": "Anti Ageing/ Anti Discoloration Portfolio",
|
| 150 |
+
"why_template": (
|
| 151 |
+
"{name} targets stubborn discoloration and uneven tone, making it a good fit when we detect pigment-related concerns."
|
| 152 |
+
),
|
| 153 |
+
"image_name": "discoloartion-defense_serum.png",
|
| 154 |
+
},
|
| 155 |
+
"c15_antioxidant_serum": {
|
| 156 |
+
"id": "c15_antioxidant_serum",
|
| 157 |
+
"name": "C15 Antioxidant Serum",
|
| 158 |
+
"step": "treatment",
|
| 159 |
+
"supported_concerns": ["Pigment_Tone_Issues", "Breakouts_Bumps"],
|
| 160 |
+
"portfolio": "Anti Ageing/ Anti Discoloration Portfolio",
|
| 161 |
+
"why_template": (
|
| 162 |
+
"{name} delivers antioxidant vitamin C to help soften the look of fine lines and uneven tone, "
|
| 163 |
+
"while supporting blemish-prone skin."
|
| 164 |
+
),
|
| 165 |
+
"image_name": "c15_anti_oxidant_serum.png",
|
| 166 |
+
},
|
| 167 |
+
|
| 168 |
+
# Sunscreens (recommended across many concerns, especially pigment)
|
| 169 |
+
"facial_gel_sunscreen": {
|
| 170 |
+
"id": "facial_gel_sunscreen",
|
| 171 |
+
"name": "Facial Gel Sunscreen",
|
| 172 |
+
"step": "sunscreen",
|
| 173 |
+
"supported_concerns": ["Pigment_Tone_Issues", "Dry_Sensitive", "Red_Scaly_Patches", "Breakouts_Bumps", "Itchy_Hives", "Possible_Infection"],
|
| 174 |
+
"portfolio": "Sunscreen",
|
| 175 |
+
"why_template": (
|
| 176 |
+
"Daily SPF is essential when any skin concern is present. {name} offers broad-spectrum protection in a "
|
| 177 |
+
"lightweight, no-white-cast gel formula that fits easily into most routines."
|
| 178 |
+
),
|
| 179 |
+
"image_name": None,
|
| 180 |
+
},
|
| 181 |
+
"facial_sunscreen_gel": {
|
| 182 |
+
"id": "facial_sunscreen_gel",
|
| 183 |
+
"name": "Facial Sunscreen Gel",
|
| 184 |
+
"step": "sunscreen",
|
| 185 |
+
"supported_concerns": ["Pigment_Tone_Issues", "Dry_Sensitive", "Red_Scaly_Patches", "Breakouts_Bumps", "Itchy_Hives", "Possible_Infection"],
|
| 186 |
+
"portfolio": "Sunscreen",
|
| 187 |
+
"why_template": (
|
| 188 |
+
"Because consistent sun protection helps with nearly all skin goals, {name} is recommended as a "
|
| 189 |
+
"broad-spectrum gel sunscreen that layers well over your routine."
|
| 190 |
+
),
|
| 191 |
+
"image_name": None,
|
| 192 |
+
},
|
| 193 |
+
}
|
| 194 |
+
|
| 195 |
+
|
| 196 |
+
ROUTINE_STEPS: List[str] = ["cleanser", "treatment", "moisturizer", "sunscreen"]
|
| 197 |
+
|
| 198 |
+
|
| 199 |
+
def load_product_config() -> Dict[str, ProductConfig]:
|
| 200 |
+
"""
|
| 201 |
+
Return the normalized product configuration.
|
| 202 |
+
|
| 203 |
+
For now we keep this static and do not dynamically parse the JSON file,
|
| 204 |
+
since the set of products is small and stable for the prototype.
|
| 205 |
+
"""
|
| 206 |
+
return PRODUCT_CONFIG
|
| 207 |
+
|
| 208 |
+
|
| 209 |
+
|
lib/__pycache__/concern_inference.cpython-312.pyc
ADDED
|
Binary file (5.59 kB). View file
|
|
|
lib/__pycache__/cosmetic_inference.cpython-312.pyc
ADDED
|
Binary file (6.09 kB). View file
|
|
|
lib/__pycache__/cosmetic_reporting.cpython-312.pyc
ADDED
|
Binary file (6.63 kB). View file
|
|
|
lib/__pycache__/derm_local.cpython-312.pyc
ADDED
|
Binary file (2.9 kB). View file
|
|
|
lib/__pycache__/full_analysis.cpython-312.pyc
ADDED
|
Binary file (3.97 kB). View file
|
|
|
lib/__pycache__/recommendations.cpython-312.pyc
ADDED
|
Binary file (4.1 kB). View file
|
|
|
lib/__pycache__/reporting.cpython-312.pyc
ADDED
|
Binary file (8.3 kB). View file
|
|
|
lib/__pycache__/session_aggregation.cpython-312.pyc
ADDED
|
Binary file (3 kB). View file
|
|
|
lib/concern_inference.py
ADDED
|
@@ -0,0 +1,135 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import functools
|
| 2 |
+
from pathlib import Path
|
| 3 |
+
from typing import Dict, List, Any
|
| 4 |
+
|
| 5 |
+
import joblib
|
| 6 |
+
import numpy as np
|
| 7 |
+
|
| 8 |
+
from config.concerns import CONCERN_CONFIG, CONCERN_TAGS
|
| 9 |
+
from config.hudson_products import load_product_config
|
| 10 |
+
from lib.derm_local import embed_image_path
|
| 11 |
+
from lib.recommendations import build_routine
|
| 12 |
+
from lib.reporting import build_report
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
@functools.lru_cache(maxsize=1)
|
| 16 |
+
def _load_models(models_dir: str = "models") -> Dict[str, Any]:
|
| 17 |
+
"""Load scaler and concern classifier from disk (cached)."""
|
| 18 |
+
models_path = Path(models_dir)
|
| 19 |
+
scaler_path = models_path / "scin_concerns_scaler.joblib"
|
| 20 |
+
model_path = models_path / "scin_concerns_logreg.joblib"
|
| 21 |
+
|
| 22 |
+
if not scaler_path.exists() or not model_path.exists():
|
| 23 |
+
raise FileNotFoundError(
|
| 24 |
+
f"Expected concern models not found in {models_path}. "
|
| 25 |
+
"Run `python scripts/train_scins_concerns.py` first."
|
| 26 |
+
)
|
| 27 |
+
|
| 28 |
+
scaler = joblib.load(scaler_path)
|
| 29 |
+
clf = joblib.load(model_path)
|
| 30 |
+
return {"scaler": scaler, "clf": clf}
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
def _proba_to_binary(proba_list: List[np.ndarray], threshold: float = 0.5) -> np.ndarray:
|
| 34 |
+
cols = []
|
| 35 |
+
for p in proba_list:
|
| 36 |
+
if p.shape[1] == 2:
|
| 37 |
+
cols.append((p[:, 1] >= threshold).astype(int))
|
| 38 |
+
else:
|
| 39 |
+
cols.append((p[:, -1] >= threshold).astype(int))
|
| 40 |
+
return np.column_stack(cols)
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
def analyze_embedding(
|
| 44 |
+
embedding: np.ndarray,
|
| 45 |
+
models_dir: str = "models",
|
| 46 |
+
threshold: float = 0.5,
|
| 47 |
+
) -> Dict[str, Any]:
|
| 48 |
+
"""
|
| 49 |
+
Run the trained concern classifier on a single Derm Foundation embedding.
|
| 50 |
+
|
| 51 |
+
Returns a dict keyed by concern tag with probability, active flag, and
|
| 52 |
+
user-facing metadata from CONCERN_CONFIG.
|
| 53 |
+
"""
|
| 54 |
+
if embedding.ndim == 1:
|
| 55 |
+
embedding = embedding.reshape(1, -1)
|
| 56 |
+
|
| 57 |
+
models = _load_models(models_dir)
|
| 58 |
+
scaler = models["scaler"]
|
| 59 |
+
clf = models["clf"]
|
| 60 |
+
|
| 61 |
+
emb_std = scaler.transform(embedding)
|
| 62 |
+
proba_list = clf.predict_proba(emb_std)
|
| 63 |
+
|
| 64 |
+
probs = []
|
| 65 |
+
for p in proba_list:
|
| 66 |
+
if p.shape[1] == 2:
|
| 67 |
+
probs.append(float(p[0, 1]))
|
| 68 |
+
else:
|
| 69 |
+
probs.append(float(p[0, -1]))
|
| 70 |
+
|
| 71 |
+
preds = _proba_to_binary(proba_list, threshold=threshold)[0]
|
| 72 |
+
|
| 73 |
+
result: Dict[str, Any] = {}
|
| 74 |
+
for idx, tag in enumerate(CONCERN_TAGS):
|
| 75 |
+
cfg = CONCERN_CONFIG.get(tag, {})
|
| 76 |
+
result[tag] = {
|
| 77 |
+
"prob": probs[idx],
|
| 78 |
+
"active": bool(preds[idx]),
|
| 79 |
+
"title": cfg.get("title", tag),
|
| 80 |
+
"description": cfg.get("description", ""),
|
| 81 |
+
"disclaimer": cfg.get("disclaimer", ""),
|
| 82 |
+
"recommended_products": cfg.get("recommended_products", []),
|
| 83 |
+
}
|
| 84 |
+
return result
|
| 85 |
+
|
| 86 |
+
|
| 87 |
+
def analyze_image(
|
| 88 |
+
image_path: str,
|
| 89 |
+
models_dir: str = "models",
|
| 90 |
+
threshold: float = 0.5,
|
| 91 |
+
) -> Dict[str, Any]:
|
| 92 |
+
"""
|
| 93 |
+
High-level helper: embed a local image with Derm Foundation and run the
|
| 94 |
+
concern classifier.
|
| 95 |
+
"""
|
| 96 |
+
emb = embed_image_path(image_path)
|
| 97 |
+
return analyze_embedding(emb, models_dir=models_dir, threshold=threshold)
|
| 98 |
+
|
| 99 |
+
|
| 100 |
+
def analyze_image_with_routine(
|
| 101 |
+
image_path: str,
|
| 102 |
+
models_dir: str = "models",
|
| 103 |
+
threshold: float = 0.5,
|
| 104 |
+
) -> Dict[str, Any]:
|
| 105 |
+
"""
|
| 106 |
+
Extended helper: run concern analysis and build a product routine
|
| 107 |
+
using the Hudson product configuration.
|
| 108 |
+
"""
|
| 109 |
+
concerns = analyze_image(image_path, models_dir=models_dir, threshold=threshold)
|
| 110 |
+
product_cfg = load_product_config()
|
| 111 |
+
routine = build_routine(concerns, product_cfg)
|
| 112 |
+
return {
|
| 113 |
+
"concerns": concerns,
|
| 114 |
+
"routine": routine,
|
| 115 |
+
}
|
| 116 |
+
|
| 117 |
+
|
| 118 |
+
def analyze_image_report(
|
| 119 |
+
image_path: str,
|
| 120 |
+
models_dir: str = "models",
|
| 121 |
+
threshold: float = 0.5,
|
| 122 |
+
) -> Dict[str, Any]:
|
| 123 |
+
"""
|
| 124 |
+
Full pipeline: embed image, get concerns, build routine, and assemble
|
| 125 |
+
a user-friendly report structure.
|
| 126 |
+
"""
|
| 127 |
+
concerns_only = analyze_image(image_path, models_dir=models_dir, threshold=threshold)
|
| 128 |
+
product_cfg = load_product_config()
|
| 129 |
+
routine = build_routine(concerns_only, product_cfg)
|
| 130 |
+
report = build_report(concerns_only, routine)
|
| 131 |
+
return report
|
| 132 |
+
|
| 133 |
+
|
| 134 |
+
|
| 135 |
+
|
lib/cosmetic_inference.py
ADDED
|
@@ -0,0 +1,143 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import functools
|
| 2 |
+
from pathlib import Path
|
| 3 |
+
from typing import Any, Dict, List
|
| 4 |
+
|
| 5 |
+
import joblib
|
| 6 |
+
import numpy as np
|
| 7 |
+
|
| 8 |
+
from config.cosmetic_targets import FST_LABELS, MONK_TONE_VALUES, TEXTURE_TAGS
|
| 9 |
+
from lib.derm_local import embed_image_path
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
COSMETIC_MODELS_DIR_DEFAULT = "models/cosmetic"
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
# Filenames for Fitzpatrick models. We default to the MLP version when present,
|
| 16 |
+
# but keep the original logistic regression as a fallback.
|
| 17 |
+
FST_MLP_SCALER_NAME = "fst_mlp_scaler.joblib"
|
| 18 |
+
FST_MLP_MODEL_NAME = "fst_mlp_model.joblib"
|
| 19 |
+
FST_LR_SCALER_NAME = "fst_scaler.joblib"
|
| 20 |
+
FST_LR_MODEL_NAME = "fst_logreg.joblib"
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
@functools.lru_cache(maxsize=1)
|
| 24 |
+
def _load_cosmetic_models(models_dir: str = COSMETIC_MODELS_DIR_DEFAULT) -> Dict[str, Any]:
|
| 25 |
+
"""Load all cosmetic scalers and models from disk (cached)."""
|
| 26 |
+
base = Path(models_dir)
|
| 27 |
+
|
| 28 |
+
models: Dict[str, Any] = {}
|
| 29 |
+
|
| 30 |
+
# Fitzpatrick: prefer MLP, fall back to logistic regression if needed.
|
| 31 |
+
fst_mlp_scaler_path = base / FST_MLP_SCALER_NAME
|
| 32 |
+
fst_mlp_model_path = base / FST_MLP_MODEL_NAME
|
| 33 |
+
if fst_mlp_scaler_path.exists() and fst_mlp_model_path.exists():
|
| 34 |
+
models["fst_scaler"] = joblib.load(fst_mlp_scaler_path)
|
| 35 |
+
models["fst_model"] = joblib.load(fst_mlp_model_path)
|
| 36 |
+
models["fst_model_type"] = "mlp"
|
| 37 |
+
else:
|
| 38 |
+
fst_scaler_path = base / FST_LR_SCALER_NAME
|
| 39 |
+
fst_model_path = base / FST_LR_MODEL_NAME
|
| 40 |
+
if fst_scaler_path.exists() and fst_model_path.exists():
|
| 41 |
+
models["fst_scaler"] = joblib.load(fst_scaler_path)
|
| 42 |
+
models["fst_model"] = joblib.load(fst_model_path)
|
| 43 |
+
models["fst_model_type"] = "logreg"
|
| 44 |
+
|
| 45 |
+
# Monk tone
|
| 46 |
+
monk_scaler_path = base / "monk_scaler.joblib"
|
| 47 |
+
monk_model_path = base / "monk_logreg.joblib"
|
| 48 |
+
if monk_scaler_path.exists() and monk_model_path.exists():
|
| 49 |
+
models["monk_scaler"] = joblib.load(monk_scaler_path)
|
| 50 |
+
models["monk_model"] = joblib.load(monk_model_path)
|
| 51 |
+
|
| 52 |
+
# Texture
|
| 53 |
+
texture_scaler_path = base / "texture_scaler.joblib"
|
| 54 |
+
texture_model_path = base / "texture_logreg.joblib"
|
| 55 |
+
if texture_scaler_path.exists() and texture_model_path.exists():
|
| 56 |
+
models["texture_scaler"] = joblib.load(texture_scaler_path)
|
| 57 |
+
models["texture_model"] = joblib.load(texture_model_path)
|
| 58 |
+
|
| 59 |
+
if not models:
|
| 60 |
+
raise FileNotFoundError(
|
| 61 |
+
f"No cosmetic models found in {base}. "
|
| 62 |
+
"Run `python scripts/train_cosmetic_attributes.py` first."
|
| 63 |
+
)
|
| 64 |
+
|
| 65 |
+
return models
|
| 66 |
+
|
| 67 |
+
|
| 68 |
+
def _ensure_2d(emb: np.ndarray) -> np.ndarray:
|
| 69 |
+
if emb.ndim == 1:
|
| 70 |
+
return emb.reshape(1, -1)
|
| 71 |
+
return emb
|
| 72 |
+
|
| 73 |
+
|
| 74 |
+
def predict_cosmetic_from_embedding(
|
| 75 |
+
embedding: np.ndarray,
|
| 76 |
+
models_dir: str = COSMETIC_MODELS_DIR_DEFAULT,
|
| 77 |
+
) -> Dict[str, Any]:
|
| 78 |
+
"""
|
| 79 |
+
Run Fitzpatrick, Monk tone, and texture classifiers on a Derm embedding.
|
| 80 |
+
Returns a nested dict suitable for cosmetic reporting.
|
| 81 |
+
"""
|
| 82 |
+
emb = _ensure_2d(embedding.astype(np.float32))
|
| 83 |
+
models = _load_cosmetic_models(models_dir)
|
| 84 |
+
result: Dict[str, Any] = {}
|
| 85 |
+
|
| 86 |
+
# Fitzpatrick
|
| 87 |
+
fst_scaler = models.get("fst_scaler")
|
| 88 |
+
fst_model = models.get("fst_model")
|
| 89 |
+
if fst_scaler is not None and fst_model is not None:
|
| 90 |
+
X_std = fst_scaler.transform(emb)
|
| 91 |
+
proba = fst_model.predict_proba(X_std)[0]
|
| 92 |
+
label_idx = int(np.argmax(proba))
|
| 93 |
+
result["fitzpatrick"] = {
|
| 94 |
+
"label": FST_LABELS[label_idx],
|
| 95 |
+
"probs": {FST_LABELS[i]: float(p) for i, p in enumerate(proba)},
|
| 96 |
+
}
|
| 97 |
+
|
| 98 |
+
# Monk tone
|
| 99 |
+
monk_scaler = models.get("monk_scaler")
|
| 100 |
+
monk_model = models.get("monk_model")
|
| 101 |
+
if monk_scaler is not None and monk_model is not None:
|
| 102 |
+
X_std = monk_scaler.transform(emb)
|
| 103 |
+
proba = monk_model.predict_proba(X_std)[0]
|
| 104 |
+
label_idx = int(np.argmax(proba))
|
| 105 |
+
result["monk_tone"] = {
|
| 106 |
+
"label": MONK_TONE_VALUES[label_idx],
|
| 107 |
+
"probs": {str(MONK_TONE_VALUES[i]): float(p) for i, p in enumerate(proba)},
|
| 108 |
+
}
|
| 109 |
+
|
| 110 |
+
# Texture (multi-label)
|
| 111 |
+
texture_scaler = models.get("texture_scaler")
|
| 112 |
+
texture_model = models.get("texture_model")
|
| 113 |
+
if texture_scaler is not None and texture_model is not None:
|
| 114 |
+
X_std = texture_scaler.transform(emb)
|
| 115 |
+
proba_list: List[np.ndarray] = texture_model.predict_proba(X_std)
|
| 116 |
+
texture_info: Dict[str, Any] = {}
|
| 117 |
+
for tag, p in zip(TEXTURE_TAGS, proba_list):
|
| 118 |
+
# p shape: (n_samples, 2); probability of positive class is column 1
|
| 119 |
+
if p.shape[1] >= 2:
|
| 120 |
+
prob_pos = float(p[0, 1])
|
| 121 |
+
else:
|
| 122 |
+
prob_pos = float(p[0, -1])
|
| 123 |
+
texture_info[tag] = {
|
| 124 |
+
"prob": prob_pos,
|
| 125 |
+
"active": bool(prob_pos >= 0.5),
|
| 126 |
+
}
|
| 127 |
+
result["texture"] = texture_info
|
| 128 |
+
|
| 129 |
+
return result
|
| 130 |
+
|
| 131 |
+
|
| 132 |
+
def analyze_cosmetic_image(
|
| 133 |
+
image_path: str,
|
| 134 |
+
models_dir: str = COSMETIC_MODELS_DIR_DEFAULT,
|
| 135 |
+
) -> Dict[str, Any]:
|
| 136 |
+
"""
|
| 137 |
+
High-level helper: embed image with Derm Foundation and run cosmetic attribute classifiers.
|
| 138 |
+
"""
|
| 139 |
+
emb = embed_image_path(image_path)
|
| 140 |
+
return predict_cosmetic_from_embedding(emb, models_dir=models_dir)
|
| 141 |
+
|
| 142 |
+
|
| 143 |
+
|
lib/cosmetic_reporting.py
ADDED
|
@@ -0,0 +1,185 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Turn cosmetic predictions (Fitzpatrick, Monk tone, texture) into a
|
| 3 |
+
user-facing educational report.
|
| 4 |
+
"""
|
| 5 |
+
|
| 6 |
+
from typing import Any, Dict, List, Tuple
|
| 7 |
+
|
| 8 |
+
from config.cosmetic_copy import (
|
| 9 |
+
FST_COPY,
|
| 10 |
+
MONK_TONE_RANGES,
|
| 11 |
+
MONK_COPY,
|
| 12 |
+
TEXTURE_COPY,
|
| 13 |
+
COSMETIC_DISCLAIMER,
|
| 14 |
+
)
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
def _pick_monk_range_label(tone_label: int) -> str:
|
| 18 |
+
for entry in MONK_TONE_RANGES:
|
| 19 |
+
if tone_label in entry["range"]:
|
| 20 |
+
return entry["name"]
|
| 21 |
+
# Fallback
|
| 22 |
+
return "medium tones"
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
def _active_textures(texture_pred: Dict[str, Dict[str, Any]]) -> List[Tuple[str, Dict[str, str]]]:
|
| 26 |
+
active: List[Tuple[str, Dict[str, str]]] = []
|
| 27 |
+
for tag, info in texture_pred.items():
|
| 28 |
+
if info.get("active"):
|
| 29 |
+
copy = TEXTURE_COPY.get(tag)
|
| 30 |
+
if copy:
|
| 31 |
+
active.append((tag, copy))
|
| 32 |
+
# If none are above threshold, still show the highest-prob one as a soft hint.
|
| 33 |
+
if not active and texture_pred:
|
| 34 |
+
best_tag = max(texture_pred.items(), key=lambda kv: kv[1].get("prob", 0.0))[0]
|
| 35 |
+
copy = TEXTURE_COPY.get(best_tag)
|
| 36 |
+
if copy:
|
| 37 |
+
active.append((best_tag, copy))
|
| 38 |
+
return active
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
def _pick_fitzpatrick_labels(probs: Dict[str, float]) -> Dict[str, Any]:
|
| 42 |
+
"""
|
| 43 |
+
Decide whether to present a single Fitzpatrick type or a 2-type range
|
| 44 |
+
based on class probabilities.
|
| 45 |
+
"""
|
| 46 |
+
if not probs:
|
| 47 |
+
return {
|
| 48 |
+
"label_mode": "unknown",
|
| 49 |
+
"labels": [],
|
| 50 |
+
"primary_label": None,
|
| 51 |
+
}
|
| 52 |
+
|
| 53 |
+
# Sort labels by probability descending
|
| 54 |
+
items = sorted(probs.items(), key=lambda kv: kv[1], reverse=True)
|
| 55 |
+
(top1_label, top1_prob) = items[0]
|
| 56 |
+
if len(items) > 1:
|
| 57 |
+
(top2_label, top2_prob) = items[1]
|
| 58 |
+
else:
|
| 59 |
+
top2_label, top2_prob = None, 0.0
|
| 60 |
+
|
| 61 |
+
# Single label if confident and well separated from the second best.
|
| 62 |
+
if top1_prob >= 0.6 and (top1_prob - top2_prob) >= 0.15:
|
| 63 |
+
return {
|
| 64 |
+
"label_mode": "single",
|
| 65 |
+
"labels": [top1_label],
|
| 66 |
+
"primary_label": top1_label,
|
| 67 |
+
}
|
| 68 |
+
|
| 69 |
+
# Otherwise present a range over the top two labels.
|
| 70 |
+
labels = [top1_label]
|
| 71 |
+
if top2_label is not None:
|
| 72 |
+
labels.append(top2_label)
|
| 73 |
+
|
| 74 |
+
return {
|
| 75 |
+
"label_mode": "range",
|
| 76 |
+
"labels": labels,
|
| 77 |
+
"primary_label": top1_label,
|
| 78 |
+
}
|
| 79 |
+
|
| 80 |
+
|
| 81 |
+
def build_cosmetic_report(cosmetic_predictions: Dict[str, Any]) -> Dict[str, Any]:
|
| 82 |
+
"""
|
| 83 |
+
Build a structured cosmetic report from predictions:
|
| 84 |
+
|
| 85 |
+
{
|
| 86 |
+
"summary": str,
|
| 87 |
+
"tone_section": { ... },
|
| 88 |
+
"texture_section": { ... },
|
| 89 |
+
"disclaimer": str,
|
| 90 |
+
}
|
| 91 |
+
"""
|
| 92 |
+
fst = cosmetic_predictions.get("fitzpatrick")
|
| 93 |
+
monk = cosmetic_predictions.get("monk_tone")
|
| 94 |
+
texture = cosmetic_predictions.get("texture", {})
|
| 95 |
+
|
| 96 |
+
summary_parts: List[str] = []
|
| 97 |
+
if fst:
|
| 98 |
+
fst_probs = fst.get("probs", {})
|
| 99 |
+
fst_labels_info = _pick_fitzpatrick_labels(fst_probs)
|
| 100 |
+
labels = fst_labels_info.get("labels", [])
|
| 101 |
+
primary_label = fst_labels_info.get("primary_label")
|
| 102 |
+
|
| 103 |
+
if primary_label:
|
| 104 |
+
fst_copy = FST_COPY.get(primary_label, {})
|
| 105 |
+
if fst_copy:
|
| 106 |
+
if fst_labels_info.get("label_mode") == "range" and len(labels) == 2:
|
| 107 |
+
label_titles = [
|
| 108 |
+
FST_COPY.get(lbl, {}).get("title", lbl) for lbl in labels
|
| 109 |
+
]
|
| 110 |
+
range_text = "–".join(label_titles)
|
| 111 |
+
summary_parts.append(
|
| 112 |
+
f"We see features that place your skin between {range_text}, "
|
| 113 |
+
"which gives us a sense of how your skin may respond to sun and active ingredients."
|
| 114 |
+
)
|
| 115 |
+
else:
|
| 116 |
+
summary_parts.append(
|
| 117 |
+
f"We see features that align with {fst_copy.get('title', 'your skin type')}, "
|
| 118 |
+
"which gives us a sense of how your skin may respond to sun and active ingredients."
|
| 119 |
+
)
|
| 120 |
+
if monk:
|
| 121 |
+
tone_label = int(monk.get("label"))
|
| 122 |
+
tone_name = _pick_monk_range_label(tone_label)
|
| 123 |
+
monk_text = MONK_COPY.get(tone_name)
|
| 124 |
+
if monk_text:
|
| 125 |
+
summary_parts.append(
|
| 126 |
+
f"Your overall tone appears to sit in the range of {tone_name}, which influences how marks and color "
|
| 127 |
+
"changes show up on your skin."
|
| 128 |
+
)
|
| 129 |
+
|
| 130 |
+
if not summary_parts:
|
| 131 |
+
summary = (
|
| 132 |
+
"We used an AI-based model to estimate a few cosmetic characteristics of your skin from this photo, "
|
| 133 |
+
"such as overall tone and visible texture. These insights can guide gentle daily care and product choices."
|
| 134 |
+
)
|
| 135 |
+
else:
|
| 136 |
+
summary = " ".join(summary_parts)
|
| 137 |
+
|
| 138 |
+
tone_section: Dict[str, Any] = {}
|
| 139 |
+
if fst:
|
| 140 |
+
fst_probs = fst.get("probs", {})
|
| 141 |
+
fst_labels_info = _pick_fitzpatrick_labels(fst_probs)
|
| 142 |
+
labels = fst_labels_info.get("labels", [])
|
| 143 |
+
primary_label = fst_labels_info.get("primary_label")
|
| 144 |
+
label_mode = fst_labels_info.get("label_mode")
|
| 145 |
+
|
| 146 |
+
if primary_label:
|
| 147 |
+
fst_copy = FST_COPY.get(primary_label, {})
|
| 148 |
+
if fst_copy:
|
| 149 |
+
tone_section["fitzpatrick"] = {
|
| 150 |
+
"label": primary_label,
|
| 151 |
+
"label_mode": label_mode,
|
| 152 |
+
"labels": labels,
|
| 153 |
+
"title": fst_copy.get("title"),
|
| 154 |
+
"overview": fst_copy.get("overview"),
|
| 155 |
+
"care_focus": fst_copy.get("care_focus"),
|
| 156 |
+
"probs": fst_probs,
|
| 157 |
+
}
|
| 158 |
+
if monk:
|
| 159 |
+
tone_label = int(monk.get("label"))
|
| 160 |
+
tone_name = _pick_monk_range_label(tone_label)
|
| 161 |
+
tone_section["monk_tone"] = {
|
| 162 |
+
"label": tone_label,
|
| 163 |
+
"group_name": tone_name,
|
| 164 |
+
"education": MONK_COPY.get(tone_name),
|
| 165 |
+
"probs": monk.get("probs", {}),
|
| 166 |
+
}
|
| 167 |
+
|
| 168 |
+
texture_section: Dict[str, Any] = {}
|
| 169 |
+
if texture:
|
| 170 |
+
active = _active_textures(texture)
|
| 171 |
+
if active:
|
| 172 |
+
texture_section["items"] = [
|
| 173 |
+
{"tag": tag, "title": copy["title"], "body": copy["body"], "prob": float(texture[tag]["prob"])}
|
| 174 |
+
for tag, copy in active
|
| 175 |
+
]
|
| 176 |
+
|
| 177 |
+
return {
|
| 178 |
+
"summary": summary,
|
| 179 |
+
"tone_section": tone_section,
|
| 180 |
+
"texture_section": texture_section,
|
| 181 |
+
"disclaimer": COSMETIC_DISCLAIMER,
|
| 182 |
+
}
|
| 183 |
+
|
| 184 |
+
|
| 185 |
+
|
lib/derm_local.py
ADDED
|
@@ -0,0 +1,47 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import io
|
| 2 |
+
from functools import lru_cache
|
| 3 |
+
from typing import Tuple
|
| 4 |
+
|
| 5 |
+
import numpy as np
|
| 6 |
+
import tensorflow as tf
|
| 7 |
+
from PIL import Image
|
| 8 |
+
from huggingface_hub import snapshot_download
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
@lru_cache(maxsize=1)
|
| 12 |
+
def _load_model() -> tf.types.experimental.GenericFunction:
|
| 13 |
+
model_dir = snapshot_download("google/derm-foundation")
|
| 14 |
+
loaded = tf.saved_model.load(model_dir)
|
| 15 |
+
return loaded.signatures["serving_default"]
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
def _encode_png_bytes(img: Image.Image) -> bytes:
|
| 19 |
+
buf = io.BytesIO()
|
| 20 |
+
img.save(buf, format="PNG")
|
| 21 |
+
return buf.getvalue()
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
def _make_example(png_bytes: bytes) -> bytes:
|
| 25 |
+
example = tf.train.Example(
|
| 26 |
+
features=tf.train.Features(
|
| 27 |
+
feature={
|
| 28 |
+
"image/encoded": tf.train.Feature(
|
| 29 |
+
bytes_list=tf.train.BytesList(value=[png_bytes])
|
| 30 |
+
)
|
| 31 |
+
}
|
| 32 |
+
)
|
| 33 |
+
)
|
| 34 |
+
return example.SerializeToString()
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
def embed_image_path(path: str) -> np.ndarray:
|
| 38 |
+
"""Return 1D embedding vector (float32) for an image file path."""
|
| 39 |
+
infer = _load_model()
|
| 40 |
+
with Image.open(path) as im:
|
| 41 |
+
im = im.convert("RGB")
|
| 42 |
+
png_bytes = _encode_png_bytes(im)
|
| 43 |
+
example_bytes = _make_example(png_bytes)
|
| 44 |
+
outputs = infer(inputs=tf.constant([example_bytes]))
|
| 45 |
+
emb = outputs["embedding"].numpy().flatten().astype(np.float32)
|
| 46 |
+
return emb
|
| 47 |
+
|
lib/full_analysis.py
ADDED
|
@@ -0,0 +1,113 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Top-level orchestrator to run both cosmetic and concern analysis.
|
| 3 |
+
|
| 4 |
+
Exposes helpers for analyzing a single image (current API) and for aggregating
|
| 5 |
+
results across multiple images from the same person/session.
|
| 6 |
+
"""
|
| 7 |
+
|
| 8 |
+
from typing import Any, Dict, List
|
| 9 |
+
|
| 10 |
+
from config.hudson_products import load_product_config
|
| 11 |
+
from lib.cosmetic_inference import analyze_cosmetic_image
|
| 12 |
+
from lib.cosmetic_reporting import build_cosmetic_report
|
| 13 |
+
from lib.concern_inference import analyze_image, analyze_image_report
|
| 14 |
+
from lib.recommendations import build_routine
|
| 15 |
+
from lib.reporting import build_report
|
| 16 |
+
from lib.session_aggregation import aggregate_concern_probs, aggregate_fitzpatrick_probs
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
def analyze_full_image(
|
| 20 |
+
image_path: str,
|
| 21 |
+
cosmetic_models_dir: str = "models/cosmetic",
|
| 22 |
+
concern_models_dir: str = "models",
|
| 23 |
+
) -> Dict[str, Any]:
|
| 24 |
+
"""
|
| 25 |
+
Run cosmetic attribute analysis and concern+routine analysis on the same image
|
| 26 |
+
and return a combined JSON structure:
|
| 27 |
+
|
| 28 |
+
{
|
| 29 |
+
"cosmetic": { ... },
|
| 30 |
+
"concerns": { ... }
|
| 31 |
+
}
|
| 32 |
+
|
| 33 |
+
The cosmetic and concern layers remain logically separate so they can be
|
| 34 |
+
used together or independently.
|
| 35 |
+
"""
|
| 36 |
+
cosmetic_pred = analyze_cosmetic_image(image_path, models_dir=cosmetic_models_dir)
|
| 37 |
+
cosmetic_report = build_cosmetic_report(cosmetic_pred)
|
| 38 |
+
|
| 39 |
+
concern_report = analyze_image_report(image_path, models_dir=concern_models_dir)
|
| 40 |
+
|
| 41 |
+
return {
|
| 42 |
+
"cosmetic": cosmetic_report,
|
| 43 |
+
"concerns": concern_report,
|
| 44 |
+
}
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
def analyze_full_session(
|
| 48 |
+
image_paths: List[str],
|
| 49 |
+
cosmetic_models_dir: str = "models/cosmetic",
|
| 50 |
+
concern_models_dir: str = "models",
|
| 51 |
+
) -> Dict[str, Any]:
|
| 52 |
+
"""
|
| 53 |
+
Run cosmetic and concern analysis on multiple images from the same person
|
| 54 |
+
and aggregate probabilities into a single session-level report.
|
| 55 |
+
|
| 56 |
+
This does not change the external API yet, but can be used by future
|
| 57 |
+
endpoints or CLI helpers to support 2–3 photos per analysis.
|
| 58 |
+
"""
|
| 59 |
+
if not image_paths:
|
| 60 |
+
raise ValueError("analyze_full_session requires at least one image path")
|
| 61 |
+
|
| 62 |
+
# Per-image cosmetic and concern predictions
|
| 63 |
+
cosmetic_preds = [
|
| 64 |
+
analyze_cosmetic_image(path, models_dir=cosmetic_models_dir)
|
| 65 |
+
for path in image_paths
|
| 66 |
+
]
|
| 67 |
+
concern_preds = [
|
| 68 |
+
analyze_image(path, models_dir=concern_models_dir) for path in image_paths
|
| 69 |
+
]
|
| 70 |
+
|
| 71 |
+
# Aggregate Fitzpatrick probabilities across images.
|
| 72 |
+
fst_mean_probs = aggregate_fitzpatrick_probs(cosmetic_preds)
|
| 73 |
+
if fst_mean_probs:
|
| 74 |
+
fst_items = sorted(fst_mean_probs.items(), key=lambda kv: kv[1], reverse=True)
|
| 75 |
+
fst_primary_label = fst_items[0][0]
|
| 76 |
+
agg_fst = {
|
| 77 |
+
"label": fst_primary_label,
|
| 78 |
+
"probs": fst_mean_probs,
|
| 79 |
+
}
|
| 80 |
+
else:
|
| 81 |
+
agg_fst = None
|
| 82 |
+
|
| 83 |
+
# Start from the first image's cosmetic prediction and override FST with
|
| 84 |
+
# the aggregated probabilities.
|
| 85 |
+
cosmetic_base = dict(cosmetic_preds[0]) if cosmetic_preds else {}
|
| 86 |
+
if agg_fst is not None:
|
| 87 |
+
cosmetic_base["fitzpatrick"] = agg_fst
|
| 88 |
+
|
| 89 |
+
cosmetic_report = build_cosmetic_report(cosmetic_base)
|
| 90 |
+
|
| 91 |
+
# Aggregate concern probabilities and build routine + report using them.
|
| 92 |
+
concern_mean_probs = aggregate_concern_probs(concern_preds)
|
| 93 |
+
if concern_mean_probs:
|
| 94 |
+
base_concerns = dict(concern_preds[0])
|
| 95 |
+
for tag, prob in concern_mean_probs.items():
|
| 96 |
+
info = base_concerns.get(tag, {})
|
| 97 |
+
info["prob"] = float(prob)
|
| 98 |
+
info["active"] = bool(prob >= 0.5)
|
| 99 |
+
base_concerns[tag] = info
|
| 100 |
+
else:
|
| 101 |
+
base_concerns = concern_preds[0] if concern_preds else {}
|
| 102 |
+
|
| 103 |
+
product_cfg = load_product_config()
|
| 104 |
+
routine = build_routine(base_concerns, product_cfg)
|
| 105 |
+
concern_report = build_report(base_concerns, routine)
|
| 106 |
+
|
| 107 |
+
return {
|
| 108 |
+
"cosmetic": cosmetic_report,
|
| 109 |
+
"concerns": concern_report,
|
| 110 |
+
}
|
| 111 |
+
|
| 112 |
+
|
| 113 |
+
|
lib/recommendations.py
ADDED
|
@@ -0,0 +1,93 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Rule-based product recommendation module.
|
| 3 |
+
|
| 4 |
+
Takes concern classifier output and Hudson product config, and builds a
|
| 5 |
+
simple skincare routine (cleanser, treatment, moisturizer, sunscreen).
|
| 6 |
+
"""
|
| 7 |
+
|
| 8 |
+
from typing import Any, Dict, List, Tuple
|
| 9 |
+
|
| 10 |
+
from config.hudson_products import PRODUCT_CONFIG, ROUTINE_STEPS, ProductConfig
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
# Business-tunable parameters
|
| 14 |
+
MIN_PROB_FOR_CONCERN: float = 0.4
|
| 15 |
+
MAX_PRODUCTS_PER_STEP: int = 1
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
def _get_active_concerns(concern_results: Dict[str, Dict[str, Any]]) -> List[Tuple[str, float]]:
|
| 19 |
+
"""Return list of (tag, prob) for active concerns, sorted by prob desc."""
|
| 20 |
+
items: List[Tuple[str, float]] = []
|
| 21 |
+
for tag, info in concern_results.items():
|
| 22 |
+
prob = float(info.get("prob", 0.0))
|
| 23 |
+
if prob >= MIN_PROB_FOR_CONCERN:
|
| 24 |
+
items.append((tag, prob))
|
| 25 |
+
# If nothing passes the threshold, fall back to the single highest-prob concern
|
| 26 |
+
if not items and concern_results:
|
| 27 |
+
tag, info = max(concern_results.items(), key=lambda kv: float(kv[1].get("prob", 0.0)))
|
| 28 |
+
items.append((tag, float(info.get("prob", 0.0))))
|
| 29 |
+
# Sort by probability descending
|
| 30 |
+
items.sort(key=lambda tp: tp[1], reverse=True)
|
| 31 |
+
return items
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
def build_routine(
|
| 35 |
+
concern_results: Dict[str, Dict[str, Any]],
|
| 36 |
+
product_config: Dict[str, ProductConfig] | None = None,
|
| 37 |
+
) -> Dict[str, Any]:
|
| 38 |
+
"""
|
| 39 |
+
Build a simple routine from concern results and product config.
|
| 40 |
+
|
| 41 |
+
concern_results: output from analyze_embedding/analyze_image
|
| 42 |
+
{concern_tag: {"prob": float, "active": bool, ...}}
|
| 43 |
+
|
| 44 |
+
Returns:
|
| 45 |
+
{
|
| 46 |
+
"concerns": [("Dry_Sensitive", 0.82), ...],
|
| 47 |
+
"steps": {
|
| 48 |
+
"cleanser": ["moisture_balancing_cleanser"],
|
| 49 |
+
"treatment": ["blemish_age_defense_serum"],
|
| 50 |
+
"moisturizer": ["daily_moisturizer"],
|
| 51 |
+
"sunscreen": ["facial_gel_sunscreen"],
|
| 52 |
+
}
|
| 53 |
+
}
|
| 54 |
+
"""
|
| 55 |
+
if product_config is None:
|
| 56 |
+
product_config = PRODUCT_CONFIG
|
| 57 |
+
|
| 58 |
+
active_concerns = _get_active_concerns(concern_results)
|
| 59 |
+
concern_prob_map = {tag: prob for tag, prob in active_concerns}
|
| 60 |
+
|
| 61 |
+
# Score products by how well they match active concerns
|
| 62 |
+
candidates_by_step: Dict[str, List[Tuple[str, float]]] = {step: [] for step in ROUTINE_STEPS}
|
| 63 |
+
|
| 64 |
+
for pid, cfg in product_config.items():
|
| 65 |
+
step = cfg["step"]
|
| 66 |
+
if step not in ROUTINE_STEPS:
|
| 67 |
+
continue
|
| 68 |
+
supported = cfg.get("supported_concerns", [])
|
| 69 |
+
score = 0.0
|
| 70 |
+
for tag in supported:
|
| 71 |
+
if tag in concern_prob_map:
|
| 72 |
+
score += concern_prob_map[tag]
|
| 73 |
+
if score > 0.0:
|
| 74 |
+
candidates_by_step[step].append((pid, score))
|
| 75 |
+
|
| 76 |
+
# Pick top products per step
|
| 77 |
+
chosen_steps: Dict[str, List[str]] = {}
|
| 78 |
+
for step in ROUTINE_STEPS:
|
| 79 |
+
cands = candidates_by_step.get(step, [])
|
| 80 |
+
if not cands:
|
| 81 |
+
continue
|
| 82 |
+
# Sort by score desc, then by product id for determinism
|
| 83 |
+
cands.sort(key=lambda x: (x[1], x[0]), reverse=True)
|
| 84 |
+
chosen_steps[step] = [pid for pid, _ in cands[:MAX_PRODUCTS_PER_STEP]]
|
| 85 |
+
|
| 86 |
+
routine = {
|
| 87 |
+
"concerns": active_concerns,
|
| 88 |
+
"steps": chosen_steps,
|
| 89 |
+
}
|
| 90 |
+
return routine
|
| 91 |
+
|
| 92 |
+
|
| 93 |
+
|
lib/reporting.py
ADDED
|
@@ -0,0 +1,215 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Reporting utilities to turn model outputs into a user-friendly skin report.
|
| 3 |
+
"""
|
| 4 |
+
|
| 5 |
+
from typing import Any, Dict, List, Tuple
|
| 6 |
+
|
| 7 |
+
from config.concerns import CONCERN_CONFIG
|
| 8 |
+
from config.hudson_products import PRODUCT_CONFIG, ProductConfig
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
# Tunable parameters
|
| 12 |
+
MAX_CONCERNS_IN_SUMMARY = 3
|
| 13 |
+
# Minimum probability for including a concern section at all.
|
| 14 |
+
MIN_PROB_FOR_SECTION = 0.4
|
| 15 |
+
# Thresholds for primary / secondary concern logic.
|
| 16 |
+
PRIMARY_MIN_PROB = 0.6
|
| 17 |
+
SECONDARY_MIN_PROB = 0.45
|
| 18 |
+
SECONDARY_MAX_GAP = 0.15
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
def _describe_probability(prob: float) -> str:
|
| 22 |
+
"""Map numeric probability to friendly language."""
|
| 23 |
+
if prob >= 0.8:
|
| 24 |
+
return "strong patterns"
|
| 25 |
+
if prob >= 0.6:
|
| 26 |
+
return "clear patterns"
|
| 27 |
+
if prob >= 0.4:
|
| 28 |
+
return "some features"
|
| 29 |
+
return "subtle hints"
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
def _format_concern_body(tag: str, prob: float) -> str:
|
| 33 |
+
cfg = CONCERN_CONFIG.get(tag, {})
|
| 34 |
+
description = cfg.get("description", "")
|
| 35 |
+
what_it_means = cfg.get("what_it_means", "")
|
| 36 |
+
care_focus = cfg.get("care_focus", "")
|
| 37 |
+
intensity = _describe_probability(prob)
|
| 38 |
+
|
| 39 |
+
parts: List[str] = []
|
| 40 |
+
if description:
|
| 41 |
+
parts.append(description)
|
| 42 |
+
if what_it_means:
|
| 43 |
+
parts.append(what_it_means)
|
| 44 |
+
if care_focus:
|
| 45 |
+
parts.append(f"In terms of care, a good focus is: {care_focus}")
|
| 46 |
+
|
| 47 |
+
# Prepend intensity phrase if we have any text
|
| 48 |
+
if parts:
|
| 49 |
+
intro = f"We see {intensity} that can be seen with this type of skin pattern."
|
| 50 |
+
return " ".join([intro] + parts)
|
| 51 |
+
return ""
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
def _split_primary_secondary(
|
| 55 |
+
active_concerns: List[Tuple[str, float]]
|
| 56 |
+
) -> Tuple[List[Tuple[str, float]], List[Tuple[str, float]]]:
|
| 57 |
+
"""
|
| 58 |
+
Given (tag, prob) pairs sorted by prob desc, split into primary and secondary
|
| 59 |
+
concerns using configurable thresholds.
|
| 60 |
+
"""
|
| 61 |
+
if not active_concerns:
|
| 62 |
+
return [], []
|
| 63 |
+
|
| 64 |
+
# active_concerns are already sorted by prob desc (see recommendations.build_routine)
|
| 65 |
+
primary_tag, primary_prob = active_concerns[0]
|
| 66 |
+
if primary_prob < PRIMARY_MIN_PROB:
|
| 67 |
+
# No strong primary concern; treat all as secondary
|
| 68 |
+
return [], active_concerns
|
| 69 |
+
|
| 70 |
+
primary = [(primary_tag, primary_prob)]
|
| 71 |
+
secondary: List[Tuple[str, float]] = []
|
| 72 |
+
|
| 73 |
+
for tag, prob in active_concerns[1:]:
|
| 74 |
+
if prob >= SECONDARY_MIN_PROB and (primary_prob - prob) <= SECONDARY_MAX_GAP:
|
| 75 |
+
secondary.append((tag, prob))
|
| 76 |
+
|
| 77 |
+
return primary, secondary
|
| 78 |
+
|
| 79 |
+
|
| 80 |
+
def _build_summary(active_concerns: List[Tuple[str, float]]) -> str:
|
| 81 |
+
if not active_concerns:
|
| 82 |
+
return (
|
| 83 |
+
"We analyzed this photo but did not find any strong, consistent patterns. "
|
| 84 |
+
"Your skin may still benefit from gentle daily care and sunscreen, and for any worries "
|
| 85 |
+
"it's always best to speak with a skin professional."
|
| 86 |
+
)
|
| 87 |
+
|
| 88 |
+
max_prob = max(prob for _, prob in active_concerns)
|
| 89 |
+
|
| 90 |
+
# If no concern stands out strongly, lean into a reassuring, maintenance-focused message.
|
| 91 |
+
if max_prob < 0.45:
|
| 92 |
+
return (
|
| 93 |
+
"We used an AI-based cosmetic model to review the image you shared and did not see strong patterns "
|
| 94 |
+
"of common skin concerns. That is generally reassuring. "
|
| 95 |
+
"Gentle cleansing, a suitable moisturizer, and daily sunscreen are usually enough to maintain healthy-looking skin. "
|
| 96 |
+
"If anything about your skin is worrying or changing quickly, it's still a good idea to check in with a skin professional."
|
| 97 |
+
)
|
| 98 |
+
|
| 99 |
+
top = active_concerns[:MAX_CONCERNS_IN_SUMMARY]
|
| 100 |
+
concern_phrases = []
|
| 101 |
+
for tag, prob in top:
|
| 102 |
+
cfg = CONCERN_CONFIG.get(tag, {})
|
| 103 |
+
title = cfg.get("title", tag.replace("_", " "))
|
| 104 |
+
intensity = _describe_probability(prob)
|
| 105 |
+
concern_phrases.append(f"{intensity} associated with {title.lower()}")
|
| 106 |
+
|
| 107 |
+
if len(concern_phrases) == 1:
|
| 108 |
+
concerns_text = concern_phrases[0]
|
| 109 |
+
elif len(concern_phrases) == 2:
|
| 110 |
+
concerns_text = " and ".join(concern_phrases)
|
| 111 |
+
else:
|
| 112 |
+
concerns_text = ", ".join(concern_phrases[:-1]) + f", and {concern_phrases[-1]}"
|
| 113 |
+
|
| 114 |
+
return (
|
| 115 |
+
"We used an AI-based cosmetic model to review the image you shared. "
|
| 116 |
+
f"In this photo we see {concerns_text}. "
|
| 117 |
+
"Different skin patterns can overlap, so this is not a diagnosis, but a guide to the kinds of care "
|
| 118 |
+
"that may support your skin."
|
| 119 |
+
)
|
| 120 |
+
|
| 121 |
+
|
| 122 |
+
def _enrich_routine(
|
| 123 |
+
routine: Dict[str, Any],
|
| 124 |
+
active_concerns: List[Tuple[str, float]],
|
| 125 |
+
) -> Dict[str, Any]:
|
| 126 |
+
"""Attach per-product rationales to the routine steps."""
|
| 127 |
+
active_tags = [tag for tag, _ in active_concerns]
|
| 128 |
+
enriched_steps: Dict[str, List[Dict[str, Any]]] = {}
|
| 129 |
+
|
| 130 |
+
for step, product_ids in routine.get("steps", {}).items():
|
| 131 |
+
enriched_steps[step] = []
|
| 132 |
+
for pid in product_ids:
|
| 133 |
+
cfg: ProductConfig | None = PRODUCT_CONFIG.get(pid) # type: ignore[assignment]
|
| 134 |
+
if not cfg:
|
| 135 |
+
continue
|
| 136 |
+
|
| 137 |
+
name = cfg["name"]
|
| 138 |
+
portfolio = cfg.get("portfolio") or ""
|
| 139 |
+
supported = cfg.get("supported_concerns", [])
|
| 140 |
+
matched = [t for t in supported if t in active_tags]
|
| 141 |
+
concern_titles = [
|
| 142 |
+
CONCERN_CONFIG.get(t, {}).get("title", t.replace("_", " ")) for t in matched
|
| 143 |
+
]
|
| 144 |
+
concern_text = ", ".join(concern_titles) if concern_titles else "your overall skin goals"
|
| 145 |
+
|
| 146 |
+
template = cfg.get("why_template")
|
| 147 |
+
if template:
|
| 148 |
+
why = template.format(name=name, concerns=concern_text, step=step, portfolio=portfolio)
|
| 149 |
+
else:
|
| 150 |
+
why = (
|
| 151 |
+
f"Because your photo suggests {concern_text}, we recommend {name}, a {step} "
|
| 152 |
+
f"from our {portfolio} range to support this area."
|
| 153 |
+
).strip()
|
| 154 |
+
|
| 155 |
+
enriched_steps[step].append(
|
| 156 |
+
{
|
| 157 |
+
"id": cfg["id"],
|
| 158 |
+
"name": name,
|
| 159 |
+
"step": step,
|
| 160 |
+
# Optional product image filename (e.g. \"daily_moisturizer.png\").
|
| 161 |
+
# The client can map this to an actual asset or URL.
|
| 162 |
+
"image_name": cfg.get("image_name"),
|
| 163 |
+
"why": why,
|
| 164 |
+
}
|
| 165 |
+
)
|
| 166 |
+
|
| 167 |
+
enriched = dict(routine)
|
| 168 |
+
enriched["steps"] = enriched_steps
|
| 169 |
+
return enriched
|
| 170 |
+
|
| 171 |
+
|
| 172 |
+
def build_report(
|
| 173 |
+
concern_results: Dict[str, Dict[str, Any]],
|
| 174 |
+
routine: Dict[str, Any],
|
| 175 |
+
) -> Dict[str, Any]:
|
| 176 |
+
"""
|
| 177 |
+
Assemble a full user-facing report from concerns and routine.
|
| 178 |
+
"""
|
| 179 |
+
# Active concerns list is already computed by the recommendation module
|
| 180 |
+
active_concerns: List[Tuple[str, float]] = routine.get("concerns", [])
|
| 181 |
+
|
| 182 |
+
summary = _build_summary(active_concerns)
|
| 183 |
+
|
| 184 |
+
concern_sections: List[Dict[str, Any]] = []
|
| 185 |
+
for tag, prob in active_concerns:
|
| 186 |
+
if prob < MIN_PROB_FOR_SECTION:
|
| 187 |
+
continue
|
| 188 |
+
cfg = CONCERN_CONFIG.get(tag, {})
|
| 189 |
+
body = _format_concern_body(tag, prob)
|
| 190 |
+
concern_sections.append(
|
| 191 |
+
{
|
| 192 |
+
"tag": tag,
|
| 193 |
+
"title": cfg.get("title", tag.replace("_", " ")),
|
| 194 |
+
"body": body,
|
| 195 |
+
"prob": prob,
|
| 196 |
+
}
|
| 197 |
+
)
|
| 198 |
+
|
| 199 |
+
enriched_routine = _enrich_routine(routine, active_concerns)
|
| 200 |
+
|
| 201 |
+
disclaimer = (
|
| 202 |
+
"This experience is powered by an AI-based cosmetic skin analysis and is not a medical diagnosis. "
|
| 203 |
+
"It cannot rule out skin disease or serious conditions. If you notice pain, bleeding, rapidly changing areas, "
|
| 204 |
+
"or anything that worries you, please consult a dermatologist or healthcare professional."
|
| 205 |
+
)
|
| 206 |
+
|
| 207 |
+
return {
|
| 208 |
+
"summary": summary,
|
| 209 |
+
"concern_sections": concern_sections,
|
| 210 |
+
"routine": enriched_routine,
|
| 211 |
+
"disclaimer": disclaimer,
|
| 212 |
+
}
|
| 213 |
+
|
| 214 |
+
|
| 215 |
+
|
lib/session_aggregation.py
ADDED
|
@@ -0,0 +1,61 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Utilities for aggregating per-image predictions into a session-level summary.
|
| 3 |
+
|
| 4 |
+
We keep this separate so both cosmetic and concern reporting can reuse it
|
| 5 |
+
without changing the external API shape.
|
| 6 |
+
"""
|
| 7 |
+
|
| 8 |
+
from typing import Any, Dict, Iterable, List, Tuple
|
| 9 |
+
|
| 10 |
+
import numpy as np
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
def aggregate_fitzpatrick_probs(predictions: Iterable[Dict[str, Any]]) -> Dict[str, float]:
|
| 14 |
+
"""
|
| 15 |
+
Average Fitzpatrick class probabilities across multiple images.
|
| 16 |
+
|
| 17 |
+
Each element in `predictions` is expected to be a dict with a
|
| 18 |
+
`\"fitzpatrick\"` key containing a `\"probs\"` mapping from label to prob.
|
| 19 |
+
"""
|
| 20 |
+
probs_list: List[Dict[str, float]] = []
|
| 21 |
+
for pred in predictions:
|
| 22 |
+
fst = pred.get("fitzpatrick")
|
| 23 |
+
if not fst:
|
| 24 |
+
continue
|
| 25 |
+
probs = fst.get("probs")
|
| 26 |
+
if isinstance(probs, dict):
|
| 27 |
+
probs_list.append({str(k): float(v) for k, v in probs.items()})
|
| 28 |
+
|
| 29 |
+
if not probs_list:
|
| 30 |
+
return {}
|
| 31 |
+
|
| 32 |
+
# Ensure all dicts share the same keys
|
| 33 |
+
labels = sorted(probs_list[0].keys())
|
| 34 |
+
arr = np.array([[p.get(label, 0.0) for label in labels] for p in probs_list], dtype=float)
|
| 35 |
+
mean = arr.mean(axis=0)
|
| 36 |
+
return {label: float(prob) for label, prob in zip(labels, mean)}
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
def aggregate_concern_probs(
|
| 40 |
+
per_image_concerns: Iterable[Dict[str, Dict[str, Any]]]
|
| 41 |
+
) -> Dict[str, float]:
|
| 42 |
+
"""
|
| 43 |
+
Average concern probabilities across multiple images.
|
| 44 |
+
|
| 45 |
+
Each element is expected to be a dict mapping concern tag -> {\"prob\": float, ...}.
|
| 46 |
+
"""
|
| 47 |
+
sums: Dict[str, float] = {}
|
| 48 |
+
counts: Dict[str, int] = {}
|
| 49 |
+
|
| 50 |
+
for concerns in per_image_concerns:
|
| 51 |
+
for tag, info in concerns.items():
|
| 52 |
+
prob = float(info.get("prob", 0.0))
|
| 53 |
+
sums[tag] = sums.get(tag, 0.0) + prob
|
| 54 |
+
counts[tag] = counts.get(tag, 0) + 1
|
| 55 |
+
|
| 56 |
+
if not sums:
|
| 57 |
+
return {}
|
| 58 |
+
|
| 59 |
+
return {tag: sums[tag] / counts[tag] for tag in sums}
|
| 60 |
+
|
| 61 |
+
|
models/cosmetic/fst_logreg.joblib
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:24dbc8e52b44abd36a265dda6cbd9a7912c6abfaebd9e33436121833a3d6f68b
|
| 3 |
+
size 295863
|
models/cosmetic/fst_mlp_model.joblib
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:0b5d723106668c44839e96fc8ae916d4939bae29ff54a45cf7e2c4f6b40a51b5
|
| 3 |
+
size 19290165
|
models/cosmetic/fst_mlp_scaler.joblib
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:12b2dc0edd9d73b3dfb350e0bc84bddb8a49884206619b5dc228c6271f32d980
|
| 3 |
+
size 148071
|
models/cosmetic/fst_scaler.joblib
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:12b2dc0edd9d73b3dfb350e0bc84bddb8a49884206619b5dc228c6271f32d980
|
| 3 |
+
size 148071
|
models/cosmetic/monk_logreg.joblib
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:e2c8b614d672f458d9c04da0d53cdee13c5071351d641316723fa8f97e8cd840
|
| 3 |
+
size 492535
|
models/cosmetic/monk_scaler.joblib
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:32630e298800bda3afeac3fdb9b06a6551d125660521928f48add653506e42e0
|
| 3 |
+
size 148071
|
models/logreg.joblib
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:fd74e16ed27584bb8434594497f7e7fa366b23995b8f4e7d247164952cf25885
|
| 3 |
+
size 148367
|
models/scaler.joblib
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:ca49b11510cd10cb49ce19495b4f336844806b1d5759c12c1d27ae16d94611ce
|
| 3 |
+
size 148071
|
models/scin_concerns_logreg.joblib
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:20413c36ac4eb49951dca32dc8a4cedbbaa9484d898b7c6e6974775f2c213bbd
|
| 3 |
+
size 298209
|
models/scin_concerns_scaler.joblib
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:708aec9bd47579e8192c0944336b86210c65de4143607928fa21ad0536e22491
|
| 3 |
+
size 148071
|
requirements.txt
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
fastapi
|
| 2 |
+
uvicorn[standard]
|
| 3 |
+
python-multipart
|
| 4 |
+
Pillow
|
| 5 |
+
numpy
|
| 6 |
+
scikit-learn
|
| 7 |
+
joblib
|
| 8 |
+
tensorflow
|
| 9 |
+
huggingface_hub
|
| 10 |
+
requests
|
| 11 |
+
python-dotenv
|
| 12 |
+
|
| 13 |
+
|