nebula-x-benchmark-dashboard / local_benchmark.py
Agnuxo's picture
Upload 19 files
f64f801 verified
#!/usr/bin/env python3
"""
Script para evaluación local de NEBULA-X antes del envío al leaderboard
Francisco Angulo de Lafuente - Agnuxo
"""
import os
import json
import torch
import time
from transformers import AutoModelForCausalLM, AutoTokenizer
from datasets import load_dataset
import numpy as np
from typing import List, Dict, Any
import random
class LocalBenchmarkRunner:
"""Ejecutor de benchmarks locales para pre-evaluación"""
def __init__(self, model_name: str = "Agnuxo/NEBULA-X"):
self.model_name = model_name
self.model = None
self.tokenizer = None
self.device = "cuda" if torch.cuda.is_available() else "cpu"
def load_model(self):
"""Carga el modelo y tokenizer"""
print(f"🔄 Cargando modelo {self.model_name}...")
try:
self.tokenizer = AutoTokenizer.from_pretrained(self.model_name)
self.model = AutoModelForCausalLM.from_pretrained(
self.model_name,
torch_dtype=torch.float16,
device_map="auto" if torch.cuda.is_available() else None
)
# Configurar pad token si no existe
if self.tokenizer.pad_token is None:
self.tokenizer.pad_token = self.tokenizer.eos_token
print(f"✅ Modelo cargado en {self.device}")
return True
except Exception as e:
print(f"❌ Error cargando modelo: {e}")
return False
def generate_response(self, prompt: str, max_length: int = 100) -> str:
"""Genera respuesta del modelo"""
inputs = self.tokenizer(prompt, return_tensors="pt", truncation=True, max_length=512)
if torch.cuda.is_available():
inputs = {k: v.to(self.device) for k, v in inputs.items()}
with torch.no_grad():
outputs = self.model.generate(
**inputs,
max_length=inputs['input_ids'].shape[1] + max_length,
do_sample=True,
temperature=0.7,
top_p=0.9,
pad_token_id=self.tokenizer.eos_token_id,
eos_token_id=self.tokenizer.eos_token_id
)
response = self.tokenizer.decode(outputs[0], skip_special_tokens=True)
# Extraer solo la nueva generación
response = response[len(prompt):].strip()
return response
def evaluate_mmlu_sample(self, n_samples: int = 50) -> Dict[str, float]:
"""Evalúa muestra de MMLU"""
print(f"📚 Evaluando MMLU (muestra de {n_samples})...")
try:
# Cargar muestra de MMLU
dataset = load_dataset("cais/mmlu", "all", split="test")
sample = random.sample(list(dataset), min(n_samples, len(dataset)))
correct = 0
total = 0
for item in sample:
question = item['question']
choices = item['choices']
correct_answer = item['answer']
# Formatear pregunta
prompt = f"Question: {question}\n"
for i, choice in enumerate(choices):
prompt += f"{chr(65+i)}. {choice}\n"
prompt += "Answer:"
# Generar respuesta
response = self.generate_response(prompt, max_length=10)
# Extraer letra de respuesta
predicted_answer = None
for char in response.upper():
if char in 'ABCD':
predicted_answer = ord(char) - ord('A')
break
if predicted_answer == correct_answer:
correct += 1
total += 1
if total % 10 == 0:
print(f" Progreso: {total}/{n_samples}")
accuracy = correct / total if total > 0 else 0
print(f"✅ MMLU Accuracy: {accuracy:.2%} ({correct}/{total})")
return {"mmlu_accuracy": accuracy, "mmlu_correct": correct, "mmlu_total": total}
except Exception as e:
print(f"❌ Error en MMLU: {e}")
return {"mmlu_accuracy": 0.0, "mmlu_correct": 0, "mmlu_total": 0}
def evaluate_gsm8k_sample(self, n_samples: int = 30) -> Dict[str, float]:
"""Evalúa muestra de GSM8K"""
print(f"🔢 Evaluando GSM8K (muestra de {n_samples})...")
try:
# Cargar muestra de GSM8K
dataset = load_dataset("gsm8k", "main", split="test")
sample = random.sample(list(dataset), min(n_samples, len(dataset)))
correct = 0
total = 0
for item in sample:
question = item['question']
correct_answer = item['answer']
# Extraer número de la respuesta correcta
correct_number = self.extract_number_from_answer(correct_answer)
# Formatear pregunta
prompt = f"Question: {question}\nAnswer:"
# Generar respuesta
response = self.generate_response(prompt, max_length=150)
# Extraer número de la respuesta generada
predicted_number = self.extract_number_from_text(response)
if predicted_number is not None and abs(predicted_number - correct_number) < 1e-6:
correct += 1
total += 1
if total % 5 == 0:
print(f" Progreso: {total}/{n_samples}")
accuracy = correct / total if total > 0 else 0
print(f"✅ GSM8K Accuracy: {accuracy:.2%} ({correct}/{total})")
return {"gsm8k_accuracy": accuracy, "gsm8k_correct": correct, "gsm8k_total": total}
except Exception as e:
print(f"❌ Error en GSM8K: {e}")
return {"gsm8k_accuracy": 0.0, "gsm8k_correct": 0, "gsm8k_total": 0}
def evaluate_instruction_following(self, n_samples: int = 20) -> Dict[str, float]:
"""Evalúa capacidad de seguir instrucciones (simulando IFEval)"""
print(f"📋 Evaluando seguimiento de instrucciones (muestra de {n_samples})...")
# Instrucciones de prueba
test_instructions = [
{
"instruction": "Write exactly 3 sentences about artificial intelligence.",
"checker": lambda x: len([s for s in x.split('.') if s.strip()]) == 3
},
{
"instruction": "List 5 colors, each on a new line, starting with the word 'Color:'",
"checker": lambda x: x.count('\n') >= 4 and x.count('Color:') >= 5
},
{
"instruction": "Write a paragraph that contains exactly the word 'important' three times.",
"checker": lambda x: x.lower().count('important') == 3
},
{
"instruction": "Write a response that starts with 'First,' and ends with 'Finally.'",
"checker": lambda x: x.strip().startswith('First,') and x.strip().endswith('Finally.')
},
{
"instruction": "Write exactly 50 words about technology.",
"checker": lambda x: 45 <= len(x.split()) <= 55
}
]
correct = 0
total = 0
for i in range(min(n_samples, len(test_instructions) * 4)):
instruction_item = test_instructions[i % len(test_instructions)]
instruction = instruction_item["instruction"]
checker = instruction_item["checker"]
prompt = f"Instruction: {instruction}\nResponse:"
response = self.generate_response(prompt, max_length=200)
if checker(response):
correct += 1
total += 1
if total % 5 == 0:
print(f" Progreso: {total}/{n_samples}")
accuracy = correct / total if total > 0 else 0
print(f"✅ Instruction Following Accuracy: {accuracy:.2%} ({correct}/{total})")
return {"instruction_accuracy": accuracy, "instruction_correct": correct, "instruction_total": total}
def evaluate_basic_reasoning(self, n_samples: int = 15) -> Dict[str, float]:
"""Evalúa razonamiento básico (simulando BBH/MuSR)"""
print(f"🧠 Evaluando razonamiento básico (muestra de {n_samples})...")
reasoning_tasks = [
{
"question": "If it takes 5 machines 5 minutes to make 5 widgets, how many minutes does it take 100 machines to make 100 widgets?",
"answer": "5",
"answer_number": 5
},
{
"question": "A man lives on the 20th floor. Every morning he takes the elevator down to ground floor. When he comes home, he takes the elevator to the 10th floor and walks the rest, except on rainy days when he takes the elevator all the way. Why?",
"answer": "short",
"answer_number": None
},
{
"question": "What comes next in the sequence: 2, 6, 12, 20, 30, ?",
"answer": "42",
"answer_number": 42
}
]
correct = 0
total = 0
for i in range(n_samples):
task = reasoning_tasks[i % len(reasoning_tasks)]
question = task["question"]
expected_answer = task["answer"]
expected_number = task.get("answer_number")
prompt = f"Question: {question}\nThink step by step.\nAnswer:"
response = self.generate_response(prompt, max_length=150)
# Verificar respuesta
if expected_number is not None:
predicted_number = self.extract_number_from_text(response)
if predicted_number is not None and abs(predicted_number - expected_number) < 1e-6:
correct += 1
else:
if expected_answer.lower() in response.lower():
correct += 1
total += 1
accuracy = correct / total if total > 0 else 0
print(f"✅ Basic Reasoning Accuracy: {accuracy:.2%} ({correct}/{total})")
return {"reasoning_accuracy": accuracy, "reasoning_correct": correct, "reasoning_total": total}
def extract_number_from_answer(self, answer_text: str) -> float:
"""Extrae número de la respuesta de GSM8K"""
import re
# Buscar números en el texto, especialmente al final
numbers = re.findall(r'-?\d+\.?\d*', answer_text)
if numbers:
try:
return float(numbers[-1]) # Último número encontrado
except ValueError:
return 0.0
return 0.0
def extract_number_from_text(self, text: str) -> float:
"""Extrae número de texto generado"""
import re
numbers = re.findall(r'-?\d+\.?\d*', text)
if numbers:
try:
return float(numbers[-1])
except ValueError:
return None
return None
def run_full_evaluation(self) -> Dict[str, Any]:
"""Ejecuta evaluación completa"""
print("🌌 NEBULA-X Local Benchmark Evaluation")
print("=" * 50)
if not self.load_model():
return {"error": "Failed to load model"}
start_time = time.time()
results = {}
# Ejecutar benchmarks
try:
results.update(self.evaluate_mmlu_sample(50))
results.update(self.evaluate_gsm8k_sample(30))
results.update(self.evaluate_instruction_following(20))
results.update(self.evaluate_basic_reasoning(15))
# Calcular score general
scores = [
results.get("mmlu_accuracy", 0),
results.get("gsm8k_accuracy", 0),
results.get("instruction_accuracy", 0),
results.get("reasoning_accuracy", 0)
]
overall_score = sum(scores) / len(scores)
results["overall_score"] = overall_score
results["evaluation_time"] = time.time() - start_time
# Mostrar resumen
print("\n📊 RESUMEN DE RESULTADOS")
print("=" * 30)
print(f"MMLU Accuracy: {results['mmlu_accuracy']:.2%}")
print(f"GSM8K Accuracy: {results['gsm8k_accuracy']:.2%}")
print(f"Instruction Following: {results['instruction_accuracy']:.2%}")
print(f"Basic Reasoning: {results['reasoning_accuracy']:.2%}")
print(f"Overall Score: {overall_score:.2%}")
print(f"Evaluation Time: {results['evaluation_time']:.1f}s")
# Guardar resultados
with open('local_benchmark_results.json', 'w') as f:
json.dump(results, f, indent=2)
print(f"\n💾 Resultados guardados en: local_benchmark_results.json")
# Predicción para leaderboard
self.predict_leaderboard_performance(results)
return results
except Exception as e:
print(f"❌ Error durante evaluación: {e}")
return {"error": str(e)}
def predict_leaderboard_performance(self, local_results: Dict[str, float]):
"""Predice performance en el leaderboard oficial"""
print("\n🔮 PREDICCIÓN PARA LEADERBOARD OFICIAL")
print("=" * 40)
# Factor de corrección (los benchmarks oficiales son más difíciles)
correction_factor = 0.7
predicted_mmlu_pro = local_results.get("mmlu_accuracy", 0) * correction_factor
predicted_math = local_results.get("gsm8k_accuracy", 0) * 0.5 # MATH es mucho más difícil
predicted_ifeval = local_results.get("instruction_accuracy", 0) * 0.8
predicted_bbh = local_results.get("reasoning_accuracy", 0) * 0.6
predicted_gpqa = predicted_mmlu_pro * 0.7 # GPQA es más específico
predicted_musr = local_results.get("reasoning_accuracy", 0) * 0.6
predicted_overall = (predicted_mmlu_pro + predicted_math + predicted_ifeval +
predicted_bbh + predicted_gpqa + predicted_musr) / 6
print(f"IFEval (pred): {predicted_ifeval:.1%}")
print(f"BBH (pred): {predicted_bbh:.1%}")
print(f"MATH (pred): {predicted_math:.1%}")
print(f"GPQA (pred): {predicted_gpqa:.1%}")
print(f"MuSR (pred): {predicted_musr:.1%}")
print(f"MMLU-PRO (pred): {predicted_mmlu_pro:.1%}")
print(f"Overall Score (pred): {predicted_overall:.1%}")
# Recomendaciones
print("\n💡 RECOMENDACIONES:")
if predicted_overall < 0.15:
print("- Modelo necesita mejoras significativas")
print("- Considera pre-entrenamiento en datasets específicos")
print("- Aumenta el tamaño del modelo si es posible")
elif predicted_overall < 0.25:
print("- Performance básica esperada")
print("- Bueno para demostrar conceptos arquitectónicos")
print("- Considera fine-tuning específico")
else:
print("- Performance competitiva esperada!")
print("- Buen candidato para el leaderboard")
def main():
"""Función principal"""
runner = LocalBenchmarkRunner()
results = runner.run_full_evaluation()
if "error" not in results:
print("\n🎯 ¡Evaluación local completada!")
print("📋 Próximo paso: Ejecutar 'python prepare_for_leaderboard.py'")
print("🚀 Luego enviar al leaderboard oficial!")
else:
print(f"\n❌ Evaluación falló: {results['error']}")
if __name__ == "__main__":
main()