File size: 1,775 Bytes
53b9009 5d66bfc 53b9009 5d66bfc 53b9009 5d66bfc 53b9009 5d66bfc 53b9009 5d66bfc 53b9009 5d66bfc 53b9009 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 |
import gradio as gr
import torch
from transformers import pipeline, AutoTokenizer, AutoModelForSequenceClassification
# Load Whisper for transcription
asr_pipeline = pipeline("automatic-speech-recognition", model="openai/whisper-large-v3")
# Load grammar scoring model
cola_model = AutoModelForSequenceClassification.from_pretrained("textattack/roberta-base-CoLA")
cola_tokenizer = AutoTokenizer.from_pretrained("textattack/roberta-base-CoLA")
grammar_pipeline = pipeline("text-classification", model=cola_model, tokenizer=cola_tokenizer)
# Load grammar correction model
correction_pipeline = pipeline("text2text-generation", model="vennify/t5-base-grammar-correction")
def process_audio(audio_path):
# Transcribe
transcription = asr_pipeline(audio_path)["text"]
# Score grammar
grammar_result = grammar_pipeline(transcription)[0]
score_label = grammar_result["label"]
score_confidence = grammar_result["score"]
# Suggest correction
corrected_text = correction_pipeline(transcription, max_length=128)[0]["generated_text"]
return transcription, f"{score_label} ({score_confidence:.2f})", corrected_text
# Gradio Interface
interface = gr.Interface(
fn=process_audio,
inputs=gr.Audio(
source="microphone", # enables both mic recording and upload
type="filepath",
label="π€ Record or Upload Audio (.wav)"
),
outputs=[
gr.Textbox(label="π Transcription"),
gr.Textbox(label="β
Grammar Score"),
gr.Textbox(label="βοΈ Suggested Correction")
],
title="ποΈ Voice Grammar Scorer",
description="Record or upload your voice (.wav). This app transcribes it, scores grammar, and suggests corrections."
)
if __name__ == "__main__":
interface.launch()
|