"use client" import { useState, useEffect, useMemo } from "react" import { Card, CardContent, CardDescription, CardHeader, CardTitle } from "@/components/ui/card" import { Button } from "@/components/ui/button" import { RadioGroup, RadioGroupItem } from "@/components/ui/radio-group" import { Label } from "@/components/ui/label" import { Textarea } from "@/components/ui/textarea" import { Input } from "@/components/ui/input" import { Badge } from "@/components/ui/badge" import { Separator } from "@/components/ui/separator" import type { CategoryScore } from "@/components/ai-evaluation-dashboard" import { HelpCircle, CheckCircle, Plus, Trash2 } from "lucide-react" import { Tooltip, TooltipContent, TooltipProvider, TooltipTrigger } from "@/components/ui/tooltip" import { SOURCE_TYPES, ADDITIONAL_ASPECTS_SECTION, getFieldPlaceholder, getHint } from "@/lib/schema" import { getBenchmarkQuestions, getProcessQuestions } from '@/lib/schema' import formSchema from '@/schema/evaluation-schema.json' // The detailed per-category and per-question hints, plus recommended placeholders, // are centralized in `lib/category-data.ts`. This component uses the exported // helpers `getHint` and `getFieldPlaceholder` and the question lists. // All benchmark questions share the same input fields; all process questions share the same input fields. // Local types used by this component (kept minimal for readability) export type Source = { id: string url: string description: string sourceType: string benchmarkName?: string metrics?: string score?: string confidenceInterval?: string version?: string taskVariants?: string customFields: Record } export type DocumentationSource = { id: string url: string description: string sourceType: string documentType?: string title?: string author?: string organization?: string date?: string customFields: Record } export type CategoryEvaluationProps = { category: { id: string; name: string; description: string; type: string; detailedGuidance?: string } score?: CategoryScore | null onScoreUpdate: (score: CategoryScore) => void onSaveDetailed?: (categoryId: string, data: any) => void } export function CategoryEvaluation({ category, score, onScoreUpdate, onSaveDetailed }: CategoryEvaluationProps) { const [benchmarkAnswers, setBenchmarkAnswers] = useState>({}) const [processAnswers, setProcessAnswers] = useState>({}) const [benchmarkSources, setBenchmarkSources] = useState>({}) const [processSources, setProcessSources] = useState>({}) const [additionalAspects, setAdditionalAspects] = useState("") const [naExplanations, setNaExplanations] = useState>({}) useEffect(() => { if (score) { // This would be populated from saved data in a real implementation // For now, we'll calculate based on the scores } }, [score]) const addSource = (questionId: string, section: "benchmark" | "process") => { if (section === "benchmark") { const newId = (globalThis.crypto && (globalThis.crypto as any).randomUUID) ? (globalThis.crypto as any).randomUUID() : Date.now().toString() const newSource: Source = { id: newId, url: "", description: "", sourceType: "internal", benchmarkName: "", metrics: "", score: "", confidenceInterval: "", version: "", taskVariants: "", customFields: {}, } setBenchmarkSources((prev) => ({ ...prev, [questionId]: [...(prev[questionId] || []), newSource], })) } else { const newId = (globalThis.crypto && (globalThis.crypto as any).randomUUID) ? (globalThis.crypto as any).randomUUID() : Date.now().toString() const newDocSource: DocumentationSource = { id: newId, url: "", description: "", sourceType: "internal", documentType: "", title: "", author: "", organization: "", date: "", customFields: {}, } setProcessSources((prev) => ({ ...prev, [questionId]: [...(prev[questionId] || []), newDocSource], })) } } const removeSource = (questionId: string, sourceId: string, section: "benchmark" | "process") => { if (section === "benchmark") { setBenchmarkSources((prev) => ({ ...prev, [questionId]: (prev[questionId] || []).filter((s) => s.id !== sourceId), })) } else { setProcessSources((prev) => ({ ...prev, [questionId]: (prev[questionId] || []).filter((s) => s.id !== sourceId), })) } } const updateSource = ( questionId: string, sourceId: string, field: string, value: string, section: "benchmark" | "process", ) => { if (section === "benchmark") { setBenchmarkSources((prev) => ({ ...prev, [questionId]: (prev[questionId] || []).map((source) => source.id === sourceId ? { ...source, [field]: value } : source, ), })) } else { setProcessSources((prev) => ({ ...prev, [questionId]: (prev[questionId] || []).map((source) => source.id === sourceId ? { ...source, [field]: value } : source, ), })) } } const updateSourceCustomField = ( questionId: string, sourceId: string, fieldType: string, value: string, section: "benchmark" | "process", ) => { if (section === "benchmark") { setBenchmarkSources((prev) => ({ ...prev, [questionId]: (prev[questionId] || []).map((source) => source.id === sourceId ? { ...source, customFields: { ...source.customFields, [fieldType]: value, }, } : source, ), })) } else { setProcessSources((prev) => ({ ...prev, [questionId]: (prev[questionId] || []).map((source) => source.id === sourceId ? { ...source, customFields: { ...source.customFields, [fieldType]: value, }, } : source, ), })) } } const currentScore = useMemo(() => { // Calculate counts const totalBenchmarkQuestions = getBenchmarkQuestions().length const totalProcessQuestions = getProcessQuestions().length const totalQuestions = totalBenchmarkQuestions + totalProcessQuestions const benchmarkYesCount = Object.values(benchmarkAnswers).filter((answer) => answer === "yes").length const processYesCount = Object.values(processAnswers).filter((answer) => answer === "yes").length const benchmarkNaCount = Object.values(benchmarkAnswers).filter((answer) => answer === "na").length const processNaCount = Object.values(processAnswers).filter((answer) => answer === "na").length const naCount = benchmarkNaCount + processNaCount const totalYes = benchmarkYesCount + processYesCount // Denominator = total questions in the category minus NA answers const totalApplicable = Math.max(0, totalQuestions - naCount) const scorePercentage = totalApplicable > 0 ? totalYes / totalApplicable : 0 let status: CategoryScore["status"] if (scorePercentage >= 0.8) status = "strong" else if (scorePercentage >= 0.6) status = "adequate" else if (scorePercentage >= 0.4) status = "weak" else status = "insufficient" const result = { benchmarkScore: benchmarkYesCount, processScore: processYesCount, totalScore: totalYes, status, totalQuestions, totalApplicable, naCount, } return result }, [benchmarkAnswers, processAnswers]) const handleAnswerChange = (questionId: string, value: string, section: "benchmark" | "process") => { if (section === "benchmark") { setBenchmarkAnswers((prev) => ({ ...prev, [questionId]: value })) if (value !== "yes") { setBenchmarkSources((prev) => ({ ...prev, [questionId]: [] })) } if (value !== "na") { setNaExplanations((prev) => { const newExplanations = { ...prev } delete newExplanations[questionId] return newExplanations }) } } else { setProcessAnswers((prev) => ({ ...prev, [questionId]: value })) if (value !== "yes") { setProcessSources((prev) => ({ ...prev, [questionId]: [] })) } if (value !== "na") { setNaExplanations((prev) => { const newExplanations = { ...prev } delete newExplanations[questionId] return newExplanations }) } } } const handleNaExplanationChange = (questionId: string, explanation: string) => { setNaExplanations((prev) => ({ ...prev, [questionId]: explanation })) } const handleSave = () => { const allAnswers = { ...benchmarkAnswers, ...processAnswers } const missingExplanations = Object.entries(allAnswers) .filter(([_, answer]) => answer === "na") .filter(([questionId, _]) => !naExplanations[questionId]?.trim()) .map(([questionId, _]) => questionId) if (missingExplanations.length > 0) { alert( `Please provide explanations for why the following questions are not applicable: ${missingExplanations.join(", ")}`, ) return } console.log("[v0] Saving category evaluation") const detailed = { benchmarkAnswers, processAnswers, benchmarkSources, processSources, additionalAspects, score: currentScore, } console.log("[v0] Calling onSaveDetailed with:", detailed) onSaveDetailed?.(category.id, detailed) console.log("[v0] Calling onScoreUpdate with:", currentScore) onScoreUpdate(currentScore) } const isComplete = Object.keys(benchmarkAnswers).length + Object.keys(processAnswers).length === getBenchmarkQuestions().length + getProcessQuestions().length return (
{category.name} {category.type} {category.description}
{isComplete && (
Score: {currentScore.totalScore}/{currentScore.totalApplicable || currentScore.totalQuestions}
{currentScore.status.charAt(0).toUpperCase() + currentScore.status.slice(1)}
)}

Source Types

{Object.entries(SOURCE_TYPES).map(([key, type]) => (
{type.label}: {type.description}
))}

Evaluation Guidance

Note: The benchmarks and evaluations listed below are suggested examples, not exhaustive requirements. You may use other relevant benchmarks and evaluation methods appropriate for your system.

{category.detailedGuidance}
Part A: Benchmark & Testing Evaluation Quantitative assessment through standardized tests and measurements ({currentScore.benchmarkScore}/6) {getBenchmarkQuestions().map((question) => (

{question.tooltip}

handleAnswerChange(question.id, value, "benchmark")} >
{benchmarkAnswers[question.id] === "na" && (