dragonllm-finance-models / test_lingua_models.py
jeanbaptdzd's picture
feat: Clean deployment to HuggingFace Space with model config test endpoint
8c0b652
#!/usr/bin/env python3
"""
Test script to verify LinguaCustodia v1.0 model configurations.
This should be deployed to HuggingFace Spaces or Scaleway to test actual model capabilities.
"""
import os
import json
import requests
from typing import Dict, Any, Optional
def get_model_config_from_hf(model_name: str) -> Optional[Dict[str, Any]]:
"""Get model configuration from HuggingFace Hub."""
try:
url = f"https://huggingface.co/{model_name}/raw/main/config.json"
response = requests.get(url, timeout=30)
response.raise_for_status()
return response.json()
except Exception as e:
print(f"Error fetching config for {model_name}: {e}")
return None
def extract_context_length(config: Dict[str, Any]) -> Optional[int]:
"""Extract context length from model configuration."""
context_params = [
"max_position_embeddings",
"n_positions",
"max_sequence_length",
"context_length",
"max_context_length"
]
for param in context_params:
if param in config:
value = config[param]
if isinstance(value, dict) and "max_position_embeddings" in value:
return value["max_position_embeddings"]
elif isinstance(value, int):
return value
return None
def test_lingua_custodia_models():
"""Test all LinguaCustodia v1.0 models."""
models_to_test = [
"LinguaCustodia/llama3.1-8b-fin-v1.0",
"LinguaCustodia/qwen3-8b-fin-v1.0",
"LinguaCustodia/qwen3-32b-fin-v1.0",
"LinguaCustodia/llama3.1-70b-fin-v1.0",
"LinguaCustodia/gemma3-12b-fin-v1.0"
]
results = {}
print("Testing LinguaCustodia v1.0 Models")
print("=" * 50)
for model_name in models_to_test:
print(f"\nTesting: {model_name}")
config = get_model_config_from_hf(model_name)
if config:
context_length = extract_context_length(config)
# Also check for other relevant config
model_type = config.get("model_type", "unknown")
architectures = config.get("architectures", [])
results[model_name] = {
"context_length": context_length,
"model_type": model_type,
"architectures": architectures,
"config_available": True,
"raw_config": config
}
print(f" Context Length: {context_length:,} tokens" if context_length else " Context Length: Unknown")
print(f" Model Type: {model_type}")
print(f" Architectures: {architectures}")
else:
results[model_name] = {
"context_length": None,
"config_available": False
}
print(" Failed to fetch configuration")
return results
def main():
"""Main test function."""
results = test_lingua_custodia_models()
print("\n" + "=" * 50)
print("SUMMARY")
print("=" * 50)
for model_name, data in results.items():
context_length = data.get("context_length")
if context_length:
print(f"{model_name}: {context_length:,} tokens")
else:
print(f"{model_name}: Unknown context length")
# Save results
with open("lingua_custodia_test_results.json", "w") as f:
json.dump(results, f, indent=2)
print(f"\nDetailed results saved to: lingua_custodia_test_results.json")
# Validate against our current configurations
print("\n" + "=" * 50)
print("VALIDATION AGAINST CURRENT CONFIG")
print("=" * 50)
expected_contexts = {
"LinguaCustodia/llama3.1-8b-fin-v1.0": 128000,
"LinguaCustodia/qwen3-8b-fin-v1.0": 32768,
"LinguaCustodia/qwen3-32b-fin-v1.0": 32768,
"LinguaCustodia/llama3.1-70b-fin-v1.0": 128000,
"LinguaCustodia/gemma3-12b-fin-v1.0": 8192
}
for model_name, expected in expected_contexts.items():
actual = results.get(model_name, {}).get("context_length")
if actual:
if actual == expected:
print(f"✅ {model_name}: {actual:,} tokens (CORRECT)")
else:
print(f"❌ {model_name}: {actual:,} tokens (EXPECTED {expected:,})")
else:
print(f"⚠️ {model_name}: Unknown (EXPECTED {expected:,})")
if __name__ == "__main__":
main()