Spaces:
Runtime error
Runtime error
File size: 4,501 Bytes
8c0b652 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 |
#!/usr/bin/env python3
"""
Test script to verify LinguaCustodia v1.0 model configurations.
This should be deployed to HuggingFace Spaces or Scaleway to test actual model capabilities.
"""
import os
import json
import requests
from typing import Dict, Any, Optional
def get_model_config_from_hf(model_name: str) -> Optional[Dict[str, Any]]:
"""Get model configuration from HuggingFace Hub."""
try:
url = f"https://huggingface.co/{model_name}/raw/main/config.json"
response = requests.get(url, timeout=30)
response.raise_for_status()
return response.json()
except Exception as e:
print(f"Error fetching config for {model_name}: {e}")
return None
def extract_context_length(config: Dict[str, Any]) -> Optional[int]:
"""Extract context length from model configuration."""
context_params = [
"max_position_embeddings",
"n_positions",
"max_sequence_length",
"context_length",
"max_context_length"
]
for param in context_params:
if param in config:
value = config[param]
if isinstance(value, dict) and "max_position_embeddings" in value:
return value["max_position_embeddings"]
elif isinstance(value, int):
return value
return None
def test_lingua_custodia_models():
"""Test all LinguaCustodia v1.0 models."""
models_to_test = [
"LinguaCustodia/llama3.1-8b-fin-v1.0",
"LinguaCustodia/qwen3-8b-fin-v1.0",
"LinguaCustodia/qwen3-32b-fin-v1.0",
"LinguaCustodia/llama3.1-70b-fin-v1.0",
"LinguaCustodia/gemma3-12b-fin-v1.0"
]
results = {}
print("Testing LinguaCustodia v1.0 Models")
print("=" * 50)
for model_name in models_to_test:
print(f"\nTesting: {model_name}")
config = get_model_config_from_hf(model_name)
if config:
context_length = extract_context_length(config)
# Also check for other relevant config
model_type = config.get("model_type", "unknown")
architectures = config.get("architectures", [])
results[model_name] = {
"context_length": context_length,
"model_type": model_type,
"architectures": architectures,
"config_available": True,
"raw_config": config
}
print(f" Context Length: {context_length:,} tokens" if context_length else " Context Length: Unknown")
print(f" Model Type: {model_type}")
print(f" Architectures: {architectures}")
else:
results[model_name] = {
"context_length": None,
"config_available": False
}
print(" Failed to fetch configuration")
return results
def main():
"""Main test function."""
results = test_lingua_custodia_models()
print("\n" + "=" * 50)
print("SUMMARY")
print("=" * 50)
for model_name, data in results.items():
context_length = data.get("context_length")
if context_length:
print(f"{model_name}: {context_length:,} tokens")
else:
print(f"{model_name}: Unknown context length")
# Save results
with open("lingua_custodia_test_results.json", "w") as f:
json.dump(results, f, indent=2)
print(f"\nDetailed results saved to: lingua_custodia_test_results.json")
# Validate against our current configurations
print("\n" + "=" * 50)
print("VALIDATION AGAINST CURRENT CONFIG")
print("=" * 50)
expected_contexts = {
"LinguaCustodia/llama3.1-8b-fin-v1.0": 128000,
"LinguaCustodia/qwen3-8b-fin-v1.0": 32768,
"LinguaCustodia/qwen3-32b-fin-v1.0": 32768,
"LinguaCustodia/llama3.1-70b-fin-v1.0": 128000,
"LinguaCustodia/gemma3-12b-fin-v1.0": 8192
}
for model_name, expected in expected_contexts.items():
actual = results.get(model_name, {}).get("context_length")
if actual:
if actual == expected:
print(f"✅ {model_name}: {actual:,} tokens (CORRECT)")
else:
print(f"❌ {model_name}: {actual:,} tokens (EXPECTED {expected:,})")
else:
print(f"⚠️ {model_name}: Unknown (EXPECTED {expected:,})")
if __name__ == "__main__":
main()
|