Update app.py
Browse files
app.py
CHANGED
|
@@ -30,7 +30,6 @@ def initialize_firebase():
|
|
| 30 |
|
| 31 |
cred = None
|
| 32 |
try:
|
| 33 |
-
# Method 1: Use specific credentials file path
|
| 34 |
firebase_credentials_path = os.getenv("FIREBASE_CREDENTIALS_PATH", "prepgenie-64134-firebase-adminsdk-fbsvc-3370ac4ab9.json")
|
| 35 |
if firebase_credentials_path and os.path.exists(firebase_credentials_path):
|
| 36 |
print(f"Initializing Firebase with credentials file: {firebase_credentials_path}")
|
|
@@ -46,7 +45,6 @@ def initialize_firebase():
|
|
| 46 |
print(f"Failed to initialize Firebase using credentials file: {e}")
|
| 47 |
|
| 48 |
try:
|
| 49 |
-
# Method 2: Use JSON string from environment variable
|
| 50 |
firebase_credentials_json = os.getenv("FIREBASE_CREDENTIALS_JSON")
|
| 51 |
if firebase_credentials_json:
|
| 52 |
print("Initializing Firebase with credentials from FIREBASE_CREDENTIALS_JSON environment variable.")
|
|
@@ -68,9 +66,12 @@ def initialize_firebase():
|
|
| 68 |
FIREBASE_APP = initialize_firebase()
|
| 69 |
FIREBASE_AVAILABLE = FIREBASE_APP is not None
|
| 70 |
|
| 71 |
-
# Configure Generative AI
|
| 72 |
-
|
| 73 |
-
|
|
|
|
|
|
|
|
|
|
| 74 |
|
| 75 |
# Load BERT model and tokenizer
|
| 76 |
try:
|
|
@@ -88,15 +89,17 @@ except Exception as e:
|
|
| 88 |
def getallinfo(data):
|
| 89 |
if not data or not data.strip():
|
| 90 |
return "No data provided or data is empty."
|
|
|
|
| 91 |
text = f"""{data} is given by the user. Make sure you are getting the details like name, experience,
|
| 92 |
education, skills of the user like in a resume. If the details are not provided return: not a resume.
|
| 93 |
If details are provided then please try again and format the whole in a single paragraph covering all the information. """
|
| 94 |
try:
|
|
|
|
| 95 |
response = text_model.generate_content(text)
|
| 96 |
response.resolve()
|
| 97 |
return response.text
|
| 98 |
except Exception as e:
|
| 99 |
-
print(f"Error in getallinfo: {e}")
|
| 100 |
return "Error processing resume data."
|
| 101 |
|
| 102 |
def file_processing(pdf_file_path):
|
|
@@ -178,6 +181,7 @@ def generate_questions(roles, data):
|
|
| 178 |
and make sure the questions are related to these metrics: Communication skills, Teamwork and collaboration,
|
| 179 |
Problem-solving and critical thinking, Time management and organization, Adaptability and resilience."""
|
| 180 |
try:
|
|
|
|
| 181 |
response = text_model.generate_content(text)
|
| 182 |
response.resolve()
|
| 183 |
questions_text = response.text.strip()
|
|
@@ -211,6 +215,7 @@ def generate_overall_feedback(data, percent, answer, questions):
|
|
| 211 |
3. Areas for improvement (2-3 points)
|
| 212 |
Be honest and constructive. Do not mention the exact score, but rate the candidate out of 10 based on their answers."""
|
| 213 |
try:
|
|
|
|
| 214 |
response = text_model.generate_content(prompt)
|
| 215 |
response.resolve()
|
| 216 |
return response.text
|
|
@@ -244,6 +249,7 @@ def generate_metrics(data, answer, question):
|
|
| 244 |
Time management and organization: [rating]
|
| 245 |
Adaptability and resilience: [rating]"""
|
| 246 |
try:
|
|
|
|
| 247 |
response = text_model.generate_content(text)
|
| 248 |
response.resolve()
|
| 249 |
metrics_text = response.text.strip()
|
|
@@ -292,6 +298,7 @@ def getmetrics(interaction, resume):
|
|
| 292 |
Adaptability and resilience: B
|
| 293 |
"""
|
| 294 |
try:
|
|
|
|
| 295 |
response = text_model.generate_content(text)
|
| 296 |
response.resolve()
|
| 297 |
return response.text
|
|
@@ -400,6 +407,7 @@ def generate_evaluation_report(metrics_data, average_rating, feedback_list, inte
|
|
| 400 |
def process_resume(file_obj):
|
| 401 |
"""Handles resume upload and processing."""
|
| 402 |
if not file_obj:
|
|
|
|
| 403 |
return (
|
| 404 |
"Please upload a PDF resume.",
|
| 405 |
gr.update(visible=False), gr.update(visible=False),
|
|
@@ -407,8 +415,8 @@ def process_resume(file_obj):
|
|
| 407 |
gr.update(visible=False), gr.update(visible=False),
|
| 408 |
gr.update(visible=False), gr.update(visible=False),
|
| 409 |
gr.update(visible=False), gr.update(visible=False),
|
| 410 |
-
gr.update(visible=False), gr.update(visible=False)
|
| 411 |
-
|
| 412 |
)
|
| 413 |
|
| 414 |
try:
|
|
@@ -419,6 +427,7 @@ def process_resume(file_obj):
|
|
| 419 |
|
| 420 |
raw_text = file_processing(file_path)
|
| 421 |
if not raw_text or not raw_text.strip():
|
|
|
|
| 422 |
return (
|
| 423 |
"Could not extract text from the PDF.",
|
| 424 |
gr.update(visible=False), gr.update(visible=False),
|
|
@@ -426,24 +435,27 @@ def process_resume(file_obj):
|
|
| 426 |
gr.update(visible=False), gr.update(visible=False),
|
| 427 |
gr.update(visible=False), gr.update(visible=False),
|
| 428 |
gr.update(visible=False), gr.update(visible=False),
|
| 429 |
-
gr.update(visible=False), gr.update(visible=False)
|
| 430 |
-
|
| 431 |
)
|
| 432 |
|
| 433 |
processed_data = getallinfo(raw_text)
|
|
|
|
|
|
|
| 434 |
return (
|
| 435 |
f"File processed successfully!",
|
| 436 |
-
gr.update(visible=True), gr.update(visible=True),
|
| 437 |
-
gr.update(visible=False), gr.update(visible=False),
|
| 438 |
-
gr.update(visible=False), gr.update(visible=False),
|
| 439 |
-
gr.update(visible=False), gr.update(visible=False),
|
| 440 |
-
gr.update(visible=False), gr.update(visible=False),
|
| 441 |
-
|
| 442 |
-
|
| 443 |
)
|
| 444 |
except Exception as e:
|
| 445 |
error_msg = f"Error processing file: {str(e)}"
|
| 446 |
print(error_msg)
|
|
|
|
| 447 |
return (
|
| 448 |
error_msg,
|
| 449 |
gr.update(visible=False), gr.update(visible=False),
|
|
@@ -451,20 +463,22 @@ def process_resume(file_obj):
|
|
| 451 |
gr.update(visible=False), gr.update(visible=False),
|
| 452 |
gr.update(visible=False), gr.update(visible=False),
|
| 453 |
gr.update(visible=False), gr.update(visible=False),
|
| 454 |
-
gr.update(visible=False), gr.update(visible=False)
|
| 455 |
-
|
| 456 |
)
|
| 457 |
|
| 458 |
def start_interview(roles, processed_resume_data):
|
| 459 |
"""Starts the interview process."""
|
| 460 |
if not roles or (isinstance(roles, list) and not any(roles)) or not processed_resume_data or not processed_resume_data.strip():
|
|
|
|
| 461 |
return (
|
| 462 |
"Please select a role and ensure resume is processed.",
|
| 463 |
-
"",
|
| 464 |
-
gr.update(visible=False), gr.update(visible=False), gr.update(visible=False),
|
| 465 |
-
gr.update(visible=False), gr.update(visible=False), gr.update(visible=False),
|
| 466 |
-
gr.update(visible=False), gr.update(visible=False),
|
| 467 |
-
|
|
|
|
| 468 |
)
|
| 469 |
|
| 470 |
try:
|
|
@@ -479,38 +493,44 @@ def start_interview(roles, processed_resume_data):
|
|
| 479 |
"metrics_list": [],
|
| 480 |
"resume_data": processed_resume_data
|
| 481 |
}
|
|
|
|
| 482 |
return (
|
| 483 |
"Interview started. Please answer the first question.",
|
| 484 |
initial_question,
|
| 485 |
-
|
| 486 |
-
|
| 487 |
-
gr.update(visible=True), gr.update(visible=True),
|
| 488 |
-
gr.update(visible=False), gr.update(visible=False), gr.update(visible=False),
|
| 489 |
-
gr.update(visible=True), gr.update(visible=True),
|
| 490 |
interview_state
|
|
|
|
| 491 |
)
|
| 492 |
except Exception as e:
|
| 493 |
error_msg = f"Error starting interview: {str(e)}"
|
| 494 |
print(error_msg)
|
|
|
|
| 495 |
return (
|
| 496 |
error_msg,
|
| 497 |
-
"",
|
| 498 |
-
gr.update(visible=False), gr.update(visible=False), gr.update(visible=False),
|
| 499 |
gr.update(visible=False), gr.update(visible=False), gr.update(visible=False),
|
| 500 |
gr.update(visible=False), gr.update(visible=False), gr.update(visible=False),
|
| 501 |
-
gr.update(visible=False),
|
|
|
|
|
|
|
| 502 |
)
|
| 503 |
|
| 504 |
def submit_answer(audio, interview_state):
|
| 505 |
"""Handles submitting an answer via audio."""
|
| 506 |
if not audio or not interview_state:
|
|
|
|
|
|
|
| 507 |
return (
|
| 508 |
"No audio recorded or interview not started.",
|
| 509 |
-
"",
|
| 510 |
-
|
| 511 |
-
gr.update(visible=False), gr.update(visible=False),
|
| 512 |
-
gr.update(visible=
|
| 513 |
-
gr.update(visible=
|
|
|
|
|
|
|
| 514 |
)
|
| 515 |
|
| 516 |
try:
|
|
@@ -547,37 +567,45 @@ def submit_answer(audio, interview_state):
|
|
| 547 |
|
| 548 |
interview_state["current_q_index"] += 1
|
| 549 |
|
|
|
|
| 550 |
return (
|
| 551 |
f"Answer submitted: {answer_text}",
|
| 552 |
answer_text,
|
| 553 |
interview_state,
|
| 554 |
-
gr.update(visible=True), gr.update(value=feedback_text, visible=True),
|
| 555 |
-
gr.update(visible=True), gr.update(value=metrics, visible=True),
|
| 556 |
-
gr.update(visible=True), gr.update(visible=True), gr.update(visible=True),
|
| 557 |
-
gr.update(visible=False), gr.update(visible=True), gr.update(visible=True)
|
|
|
|
| 558 |
)
|
| 559 |
|
| 560 |
except Exception as e:
|
| 561 |
print(f"Error processing audio answer: {e}")
|
|
|
|
| 562 |
return (
|
| 563 |
"Error processing audio. Please try again.",
|
| 564 |
-
"",
|
| 565 |
-
|
| 566 |
-
gr.update(visible=False), gr.update(visible=False),
|
| 567 |
-
gr.update(visible=
|
| 568 |
-
gr.update(visible=
|
|
|
|
|
|
|
| 569 |
)
|
| 570 |
|
| 571 |
def next_question(interview_state):
|
| 572 |
"""Moves to the next question or ends the interview."""
|
| 573 |
if not interview_state:
|
|
|
|
| 574 |
return (
|
| 575 |
"Interview not started.",
|
| 576 |
-
"",
|
| 577 |
-
|
| 578 |
-
gr.update(visible=
|
| 579 |
-
gr.update(visible=False), gr.update(visible=
|
| 580 |
-
gr.update(visible=False), gr.update(visible=False)
|
|
|
|
|
|
|
| 581 |
)
|
| 582 |
|
| 583 |
current_q_index = interview_state["current_q_index"]
|
|
@@ -585,33 +613,41 @@ def next_question(interview_state):
|
|
| 585 |
|
| 586 |
if current_q_index < total_questions:
|
| 587 |
next_q = interview_state["questions"][current_q_index]
|
|
|
|
| 588 |
return (
|
| 589 |
f"Question {current_q_index + 1}/{total_questions}",
|
| 590 |
next_q,
|
| 591 |
interview_state,
|
| 592 |
-
gr.update(visible=True), gr.update(visible=True), gr.update(visible=True),
|
| 593 |
-
gr.update(visible=False), gr.update(visible=False), gr.update(visible=False),
|
| 594 |
-
gr.update(visible=
|
| 595 |
-
"", {}
|
|
|
|
| 596 |
)
|
| 597 |
else:
|
|
|
|
| 598 |
return (
|
| 599 |
"Interview completed! Click 'Submit Interview' to see your evaluation.",
|
| 600 |
"Interview Finished",
|
| 601 |
interview_state,
|
| 602 |
-
gr.update(visible=False), gr.update(visible=False), gr.update(visible=False),
|
| 603 |
-
gr.update(visible=False), gr.update(visible=False), gr.update(visible=False),
|
| 604 |
-
gr.update(visible=True), gr.update(visible=
|
| 605 |
-
"", {}
|
|
|
|
|
|
|
| 606 |
)
|
| 607 |
|
| 608 |
def submit_interview(interview_state):
|
| 609 |
"""Handles final submission, triggers evaluation, and prepares results."""
|
| 610 |
if not interview_state or not isinstance(interview_state, dict):
|
|
|
|
| 611 |
return (
|
| 612 |
"Interview state is missing or invalid.",
|
| 613 |
-
interview_state,
|
| 614 |
-
gr.update(visible=False), gr.update(visible=False),
|
|
|
|
|
|
|
| 615 |
)
|
| 616 |
|
| 617 |
try:
|
|
@@ -624,10 +660,13 @@ def submit_interview(interview_state):
|
|
| 624 |
if not interactions:
|
| 625 |
error_msg = "No interview interactions found to evaluate."
|
| 626 |
print(error_msg)
|
|
|
|
| 627 |
return (
|
| 628 |
error_msg,
|
| 629 |
interview_state,
|
| 630 |
-
gr.update(visible=False), gr.update(visible=False),
|
|
|
|
|
|
|
| 631 |
)
|
| 632 |
|
| 633 |
raw_metrics_text = getmetrics(interactions, resume_data)
|
|
@@ -645,22 +684,29 @@ def submit_interview(interview_state):
|
|
| 645 |
chart_buffer = create_metrics_chart(final_metrics)
|
| 646 |
print("Evaluation chart generated.")
|
| 647 |
|
|
|
|
| 648 |
return (
|
| 649 |
"Evaluation Complete! See your results below.",
|
| 650 |
-
interview_state,
|
| 651 |
-
gr.update(visible=True, value=report_text),
|
| 652 |
-
gr.update(visible=True, value=chart_buffer)
|
|
|
|
|
|
|
|
|
|
|
|
|
| 653 |
)
|
| 654 |
except Exception as e:
|
| 655 |
error_msg = f"Error during evaluation submission: {str(e)}"
|
| 656 |
print(error_msg)
|
| 657 |
import traceback
|
| 658 |
traceback.print_exc()
|
|
|
|
| 659 |
return (
|
| 660 |
error_msg,
|
| 661 |
-
interview_state,
|
| 662 |
-
gr.update(visible=True, value=error_msg),
|
| 663 |
-
gr.update(visible=False)
|
|
|
|
| 664 |
)
|
| 665 |
|
| 666 |
# --- Login and Navigation Logic (Firebase Integrated) ---
|
|
@@ -702,45 +748,45 @@ def login(email, password):
|
|
| 702 |
)
|
| 703 |
|
| 704 |
def signup(email, password, username):
|
| 705 |
-
|
| 706 |
-
if not FIREBASE_AVAILABLE:
|
| 707 |
return (
|
| 708 |
-
|
| 709 |
-
|
| 710 |
-
|
|
|
|
| 711 |
if not email or not password or not username:
|
| 712 |
return (
|
| 713 |
-
|
| 714 |
-
|
| 715 |
-
|
| 716 |
)
|
| 717 |
try:
|
| 718 |
user = auth.create_user(email=email, password=password, uid=username, display_name=username)
|
| 719 |
success_msg = f"Account created successfully for {username}!"
|
| 720 |
return (
|
| 721 |
-
|
| 722 |
-
|
| 723 |
-
|
| 724 |
)
|
| 725 |
except auth.UidAlreadyExistsError:
|
| 726 |
return (
|
| 727 |
-
|
| 728 |
-
|
| 729 |
-
|
| 730 |
)
|
| 731 |
except auth.EmailAlreadyExistsError:
|
| 732 |
return (
|
| 733 |
-
|
| 734 |
-
|
| 735 |
-
|
| 736 |
)
|
| 737 |
except Exception as e:
|
| 738 |
error_msg = f"Signup failed: {str(e)}"
|
| 739 |
print(error_msg)
|
| 740 |
return (
|
| 741 |
-
|
| 742 |
-
|
| 743 |
-
|
| 744 |
)
|
| 745 |
|
| 746 |
def logout():
|
|
@@ -851,7 +897,7 @@ with gr.Blocks(title="PrepGenie - Mock Interview") as demo:
|
|
| 851 |
process_chat_btn = gr.Button("Process Resume")
|
| 852 |
with gr.Column():
|
| 853 |
file_status_chat = gr.Textbox(label="Status", interactive=False)
|
| 854 |
-
chatbot = gr.Chatbot(label="Chat History", visible=False, type="messages")
|
| 855 |
query_input = gr.Textbox(label="Ask about your resume", placeholder="Type your question here...", visible=False)
|
| 856 |
send_btn = gr.Button("Send", visible=False)
|
| 857 |
else:
|
|
@@ -874,20 +920,22 @@ with gr.Blocks(title="PrepGenie - Mock Interview") as demo:
|
|
| 874 |
question_display, answer_instructions, audio_input,
|
| 875 |
submit_answer_btn, next_question_btn, submit_interview_btn,
|
| 876 |
answer_display, feedback_display, metrics_display,
|
| 877 |
-
processed_resume_data_hidden_interview
|
| 878 |
]
|
| 879 |
)
|
| 880 |
|
| 881 |
start_interview_btn.click(
|
| 882 |
fn=start_interview,
|
| 883 |
inputs=[role_selection, processed_resume_data_hidden_interview],
|
| 884 |
-
# --- CORRECTION: Remove direct subscripting of interview_state ---
|
| 885 |
outputs=[
|
| 886 |
file_status_interview, question_display,
|
|
|
|
|
|
|
|
|
|
| 887 |
audio_input, submit_answer_btn, next_question_btn,
|
| 888 |
submit_interview_btn, feedback_display, metrics_display,
|
| 889 |
question_display, answer_instructions,
|
| 890 |
-
interview_state
|
| 891 |
]
|
| 892 |
)
|
| 893 |
|
|
@@ -896,9 +944,9 @@ with gr.Blocks(title="PrepGenie - Mock Interview") as demo:
|
|
| 896 |
inputs=[audio_input, interview_state],
|
| 897 |
outputs=[
|
| 898 |
file_status_interview, answer_display, interview_state,
|
| 899 |
-
feedback_display, feedback_display,
|
| 900 |
-
metrics_display, metrics_display,
|
| 901 |
-
audio_input, submit_answer_btn, next_question_btn,
|
| 902 |
submit_interview_btn, question_display, answer_instructions
|
| 903 |
]
|
| 904 |
)
|
|
@@ -911,7 +959,7 @@ with gr.Blocks(title="PrepGenie - Mock Interview") as demo:
|
|
| 911 |
audio_input, submit_answer_btn, next_question_btn,
|
| 912 |
feedback_display, metrics_display, submit_interview_btn,
|
| 913 |
question_display, answer_instructions,
|
| 914 |
-
answer_display, metrics_display
|
| 915 |
]
|
| 916 |
)
|
| 917 |
|
|
@@ -919,10 +967,10 @@ with gr.Blocks(title="PrepGenie - Mock Interview") as demo:
|
|
| 919 |
fn=submit_interview,
|
| 920 |
inputs=[interview_state],
|
| 921 |
outputs=[
|
| 922 |
-
file_status_interview,
|
| 923 |
-
interview_state,
|
| 924 |
-
evaluation_report_display,
|
| 925 |
-
evaluation_chart_display
|
| 926 |
]
|
| 927 |
)
|
| 928 |
|
|
@@ -980,6 +1028,6 @@ with gr.Blocks(title="PrepGenie - Mock Interview") as demo:
|
|
| 980 |
outputs=[login_section, signup_section]
|
| 981 |
)
|
| 982 |
|
| 983 |
-
|
| 984 |
if __name__ == "__main__":
|
| 985 |
demo.launch(server_name="0.0.0.0", server_port=7860, share=False)
|
|
|
|
| 30 |
|
| 31 |
cred = None
|
| 32 |
try:
|
|
|
|
| 33 |
firebase_credentials_path = os.getenv("FIREBASE_CREDENTIALS_PATH", "prepgenie-64134-firebase-adminsdk-fbsvc-3370ac4ab9.json")
|
| 34 |
if firebase_credentials_path and os.path.exists(firebase_credentials_path):
|
| 35 |
print(f"Initializing Firebase with credentials file: {firebase_credentials_path}")
|
|
|
|
| 45 |
print(f"Failed to initialize Firebase using credentials file: {e}")
|
| 46 |
|
| 47 |
try:
|
|
|
|
| 48 |
firebase_credentials_json = os.getenv("FIREBASE_CREDENTIALS_JSON")
|
| 49 |
if firebase_credentials_json:
|
| 50 |
print("Initializing Firebase with credentials from FIREBASE_CREDENTIALS_JSON environment variable.")
|
|
|
|
| 66 |
FIREBASE_APP = initialize_firebase()
|
| 67 |
FIREBASE_AVAILABLE = FIREBASE_APP is not None
|
| 68 |
|
| 69 |
+
# --- Configure Generative AI (CHANGED MODEL) ---
|
| 70 |
+
# Replace 'gemini-pro' with 'gemini-flash-2.5'
|
| 71 |
+
genai.configure(api_key=os.getenv("GOOGLE_API_KEY") or "YOUR_DEFAULT_API_KEY_HERE")
|
| 72 |
+
# text_model = genai.GenerativeModel("gemini-pro") # OLD
|
| 73 |
+
text_model = genai.GenerativeModel("gemini-1.5-flash") # NEW - Use the correct model name
|
| 74 |
+
print("Using Generative AI model: gemini-1.5-flash")
|
| 75 |
|
| 76 |
# Load BERT model and tokenizer
|
| 77 |
try:
|
|
|
|
| 89 |
def getallinfo(data):
|
| 90 |
if not data or not data.strip():
|
| 91 |
return "No data provided or data is empty."
|
| 92 |
+
# Use the new model instance
|
| 93 |
text = f"""{data} is given by the user. Make sure you are getting the details like name, experience,
|
| 94 |
education, skills of the user like in a resume. If the details are not provided return: not a resume.
|
| 95 |
If details are provided then please try again and format the whole in a single paragraph covering all the information. """
|
| 96 |
try:
|
| 97 |
+
# Use the correct model instance
|
| 98 |
response = text_model.generate_content(text)
|
| 99 |
response.resolve()
|
| 100 |
return response.text
|
| 101 |
except Exception as e:
|
| 102 |
+
print(f"Error in getallinfo: {e}") # This should now be clearer
|
| 103 |
return "Error processing resume data."
|
| 104 |
|
| 105 |
def file_processing(pdf_file_path):
|
|
|
|
| 181 |
and make sure the questions are related to these metrics: Communication skills, Teamwork and collaboration,
|
| 182 |
Problem-solving and critical thinking, Time management and organization, Adaptability and resilience."""
|
| 183 |
try:
|
| 184 |
+
# Use the correct model instance
|
| 185 |
response = text_model.generate_content(text)
|
| 186 |
response.resolve()
|
| 187 |
questions_text = response.text.strip()
|
|
|
|
| 215 |
3. Areas for improvement (2-3 points)
|
| 216 |
Be honest and constructive. Do not mention the exact score, but rate the candidate out of 10 based on their answers."""
|
| 217 |
try:
|
| 218 |
+
# Use the correct model instance
|
| 219 |
response = text_model.generate_content(prompt)
|
| 220 |
response.resolve()
|
| 221 |
return response.text
|
|
|
|
| 249 |
Time management and organization: [rating]
|
| 250 |
Adaptability and resilience: [rating]"""
|
| 251 |
try:
|
| 252 |
+
# Use the correct model instance
|
| 253 |
response = text_model.generate_content(text)
|
| 254 |
response.resolve()
|
| 255 |
metrics_text = response.text.strip()
|
|
|
|
| 298 |
Adaptability and resilience: B
|
| 299 |
"""
|
| 300 |
try:
|
| 301 |
+
# Use the correct model instance
|
| 302 |
response = text_model.generate_content(text)
|
| 303 |
response.resolve()
|
| 304 |
return response.text
|
|
|
|
| 407 |
def process_resume(file_obj):
|
| 408 |
"""Handles resume upload and processing."""
|
| 409 |
if not file_obj:
|
| 410 |
+
# Return exactly 13 values
|
| 411 |
return (
|
| 412 |
"Please upload a PDF resume.",
|
| 413 |
gr.update(visible=False), gr.update(visible=False),
|
|
|
|
| 415 |
gr.update(visible=False), gr.update(visible=False),
|
| 416 |
gr.update(visible=False), gr.update(visible=False),
|
| 417 |
gr.update(visible=False), gr.update(visible=False),
|
| 418 |
+
gr.update(visible=False), gr.update(visible=False)
|
| 419 |
+
# 13 values total (no extra processed_data at the end)
|
| 420 |
)
|
| 421 |
|
| 422 |
try:
|
|
|
|
| 427 |
|
| 428 |
raw_text = file_processing(file_path)
|
| 429 |
if not raw_text or not raw_text.strip():
|
| 430 |
+
# Return exactly 13 values on error
|
| 431 |
return (
|
| 432 |
"Could not extract text from the PDF.",
|
| 433 |
gr.update(visible=False), gr.update(visible=False),
|
|
|
|
| 435 |
gr.update(visible=False), gr.update(visible=False),
|
| 436 |
gr.update(visible=False), gr.update(visible=False),
|
| 437 |
gr.update(visible=False), gr.update(visible=False),
|
| 438 |
+
gr.update(visible=False), gr.update(visible=False)
|
| 439 |
+
# 13 values total
|
| 440 |
)
|
| 441 |
|
| 442 |
processed_data = getallinfo(raw_text)
|
| 443 |
+
# Return exactly 13 values on success
|
| 444 |
+
# The last output component is processed_resume_data_hidden_interview
|
| 445 |
return (
|
| 446 |
f"File processed successfully!",
|
| 447 |
+
gr.update(visible=True), gr.update(visible=True), # Role, Start Btn
|
| 448 |
+
gr.update(visible=False), gr.update(visible=False), # Q Display, A Instructions
|
| 449 |
+
gr.update(visible=False), gr.update(visible=False), # Audio, Submit Ans
|
| 450 |
+
gr.update(visible=False), gr.update(visible=False), # Next Q, Submit Int
|
| 451 |
+
gr.update(visible=False), gr.update(visible=False), # Answer, Feedback
|
| 452 |
+
processed_data # This goes to the 13th output component
|
| 453 |
+
# 13 values total
|
| 454 |
)
|
| 455 |
except Exception as e:
|
| 456 |
error_msg = f"Error processing file: {str(e)}"
|
| 457 |
print(error_msg)
|
| 458 |
+
# Ensure exactly 13 values are returned even on error
|
| 459 |
return (
|
| 460 |
error_msg,
|
| 461 |
gr.update(visible=False), gr.update(visible=False),
|
|
|
|
| 463 |
gr.update(visible=False), gr.update(visible=False),
|
| 464 |
gr.update(visible=False), gr.update(visible=False),
|
| 465 |
gr.update(visible=False), gr.update(visible=False),
|
| 466 |
+
gr.update(visible=False), gr.update(visible=False)
|
| 467 |
+
# 13 values total
|
| 468 |
)
|
| 469 |
|
| 470 |
def start_interview(roles, processed_resume_data):
|
| 471 |
"""Starts the interview process."""
|
| 472 |
if not roles or (isinstance(roles, list) and not any(roles)) or not processed_resume_data or not processed_resume_data.strip():
|
| 473 |
+
# Return exactly 11 values matching the outputs list
|
| 474 |
return (
|
| 475 |
"Please select a role and ensure resume is processed.",
|
| 476 |
+
"", # initial question
|
| 477 |
+
gr.update(visible=False), gr.update(visible=False), gr.update(visible=False), # Audio, Submit, Next
|
| 478 |
+
gr.update(visible=False), gr.update(visible=False), gr.update(visible=False), # Submit Int, Feedback, Metrics
|
| 479 |
+
gr.update(visible=False), gr.update(visible=False), # Q Display, A Instructions
|
| 480 |
+
{} # interview_state
|
| 481 |
+
# 11 values total
|
| 482 |
)
|
| 483 |
|
| 484 |
try:
|
|
|
|
| 493 |
"metrics_list": [],
|
| 494 |
"resume_data": processed_resume_data
|
| 495 |
}
|
| 496 |
+
# Return exactly 11 values
|
| 497 |
return (
|
| 498 |
"Interview started. Please answer the first question.",
|
| 499 |
initial_question,
|
| 500 |
+
gr.update(visible=True), gr.update(visible=True), gr.update(visible=True), # Audio, Submit, Next
|
| 501 |
+
gr.update(visible=False), gr.update(visible=False), gr.update(visible=False), # Submit Int, Feedback, Metrics
|
| 502 |
+
gr.update(visible=True), gr.update(visible=True), # Q Display, A Instructions
|
|
|
|
|
|
|
| 503 |
interview_state
|
| 504 |
+
# 11 values total
|
| 505 |
)
|
| 506 |
except Exception as e:
|
| 507 |
error_msg = f"Error starting interview: {str(e)}"
|
| 508 |
print(error_msg)
|
| 509 |
+
# Return exactly 11 values on error
|
| 510 |
return (
|
| 511 |
error_msg,
|
| 512 |
+
"", # initial question
|
|
|
|
| 513 |
gr.update(visible=False), gr.update(visible=False), gr.update(visible=False),
|
| 514 |
gr.update(visible=False), gr.update(visible=False), gr.update(visible=False),
|
| 515 |
+
gr.update(visible=False), gr.update(visible=False),
|
| 516 |
+
{} # interview_state
|
| 517 |
+
# 11 values total
|
| 518 |
)
|
| 519 |
|
| 520 |
def submit_answer(audio, interview_state):
|
| 521 |
"""Handles submitting an answer via audio."""
|
| 522 |
if not audio or not interview_state:
|
| 523 |
+
# Return values matching the outputs list, ensuring audio is handled correctly
|
| 524 |
+
# If audio is invalid, return None or gr.update() for the audio component
|
| 525 |
return (
|
| 526 |
"No audio recorded or interview not started.",
|
| 527 |
+
"", # answer_text
|
| 528 |
+
interview_state, # state
|
| 529 |
+
gr.update(visible=False), gr.update(visible=False), # Feedback display/value
|
| 530 |
+
gr.update(visible=False), gr.update(visible=False), # Metrics display/value
|
| 531 |
+
gr.update(visible=True), gr.update(visible=True), gr.update(visible=True), # Audio, Submit, Next (keep visible for retry)
|
| 532 |
+
gr.update(visible=False), gr.update(visible=True), gr.update(visible=True) # Submit Int (hide), Q Display, A Instructions
|
| 533 |
+
# 13 values total (matching outputs list)
|
| 534 |
)
|
| 535 |
|
| 536 |
try:
|
|
|
|
| 567 |
|
| 568 |
interview_state["current_q_index"] += 1
|
| 569 |
|
| 570 |
+
# Return values matching the outputs list
|
| 571 |
return (
|
| 572 |
f"Answer submitted: {answer_text}",
|
| 573 |
answer_text,
|
| 574 |
interview_state,
|
| 575 |
+
gr.update(visible=True), gr.update(value=feedback_text, visible=True), # Feedback
|
| 576 |
+
gr.update(visible=True), gr.update(value=metrics, visible=True), # Metrics
|
| 577 |
+
gr.update(visible=True), gr.update(visible=True), gr.update(visible=True), # Audio, Submit, Next
|
| 578 |
+
gr.update(visible=False), gr.update(visible=True), gr.update(visible=True) # Submit Int, Q Display, A Instructions
|
| 579 |
+
# 13 values total
|
| 580 |
)
|
| 581 |
|
| 582 |
except Exception as e:
|
| 583 |
print(f"Error processing audio answer: {e}")
|
| 584 |
+
# Return values matching the outputs list, handling error
|
| 585 |
return (
|
| 586 |
"Error processing audio. Please try again.",
|
| 587 |
+
"", # answer_text
|
| 588 |
+
interview_state, # state (pass through)
|
| 589 |
+
gr.update(visible=False), gr.update(visible=False), # Feedback
|
| 590 |
+
gr.update(visible=False), gr.update(visible=False), # Metrics
|
| 591 |
+
gr.update(visible=True), gr.update(visible=True), gr.update(visible=True), # Audio, Submit, Next (keep for retry)
|
| 592 |
+
gr.update(visible=False), gr.update(visible=True), gr.update(visible=True) # Submit Int, Q Display, A Instructions
|
| 593 |
+
# 13 values total
|
| 594 |
)
|
| 595 |
|
| 596 |
def next_question(interview_state):
|
| 597 |
"""Moves to the next question or ends the interview."""
|
| 598 |
if not interview_state:
|
| 599 |
+
# Return values matching outputs list
|
| 600 |
return (
|
| 601 |
"Interview not started.",
|
| 602 |
+
"", # next_q
|
| 603 |
+
interview_state, # state
|
| 604 |
+
gr.update(visible=True), gr.update(visible=True), gr.update(visible=True), # Audio, Submit, Next
|
| 605 |
+
gr.update(visible=False), gr.update(visible=False), gr.update(visible=False), # Feedback, Metrics, Submit Int
|
| 606 |
+
gr.update(visible=False), gr.update(visible=False), # Q Display, A Instructions
|
| 607 |
+
"", {} # Clear answer/metrics display
|
| 608 |
+
# 13 values total
|
| 609 |
)
|
| 610 |
|
| 611 |
current_q_index = interview_state["current_q_index"]
|
|
|
|
| 613 |
|
| 614 |
if current_q_index < total_questions:
|
| 615 |
next_q = interview_state["questions"][current_q_index]
|
| 616 |
+
# Return values for next question
|
| 617 |
return (
|
| 618 |
f"Question {current_q_index + 1}/{total_questions}",
|
| 619 |
next_q,
|
| 620 |
interview_state,
|
| 621 |
+
gr.update(visible=True), gr.update(visible=True), gr.update(visible=True), # Audio, Submit, Next
|
| 622 |
+
gr.update(visible=False), gr.update(visible=False), gr.update(visible=False), # Feedback, Metrics, Submit Int
|
| 623 |
+
gr.update(visible=True), gr.update(visible=True), # Q Display, A Instructions
|
| 624 |
+
"", {} # Clear previous answer/metrics display
|
| 625 |
+
# 13 values total
|
| 626 |
)
|
| 627 |
else:
|
| 628 |
+
# Interview finished
|
| 629 |
return (
|
| 630 |
"Interview completed! Click 'Submit Interview' to see your evaluation.",
|
| 631 |
"Interview Finished",
|
| 632 |
interview_state,
|
| 633 |
+
gr.update(visible=False), gr.update(visible=False), gr.update(visible=False), # Audio, Submit, Next (hide)
|
| 634 |
+
gr.update(visible=False), gr.update(visible=False), gr.update(visible=False), # Feedback, Metrics, Submit Int (hide feedback/metrics)
|
| 635 |
+
gr.update(visible=True), gr.update(visible=False), # Q Display (show finished), A Instructions (hide)
|
| 636 |
+
"", {} # Clear answer/metrics display
|
| 637 |
+
# 13 values total
|
| 638 |
+
# Ensure submit_interview_btn is made visible here or in the event listener logic if needed immediately
|
| 639 |
)
|
| 640 |
|
| 641 |
def submit_interview(interview_state):
|
| 642 |
"""Handles final submission, triggers evaluation, and prepares results."""
|
| 643 |
if not interview_state or not isinstance(interview_state, dict):
|
| 644 |
+
# Return values matching outputs list for submit_interview_btn.click
|
| 645 |
return (
|
| 646 |
"Interview state is missing or invalid.",
|
| 647 |
+
interview_state, # state (pass through)
|
| 648 |
+
gr.update(visible=False), gr.update(visible=False), # Report, Chart (hide)
|
| 649 |
+
"", None # Report text, Chart image (clear)
|
| 650 |
+
# 5 values total (matching submit_interview_btn.click outputs)
|
| 651 |
)
|
| 652 |
|
| 653 |
try:
|
|
|
|
| 660 |
if not interactions:
|
| 661 |
error_msg = "No interview interactions found to evaluate."
|
| 662 |
print(error_msg)
|
| 663 |
+
# Return values matching outputs list
|
| 664 |
return (
|
| 665 |
error_msg,
|
| 666 |
interview_state,
|
| 667 |
+
gr.update(visible=False), gr.update(visible=False), # Report, Chart (hide)
|
| 668 |
+
"", None # Report text, Chart image (clear)
|
| 669 |
+
# 5 values total
|
| 670 |
)
|
| 671 |
|
| 672 |
raw_metrics_text = getmetrics(interactions, resume_data)
|
|
|
|
| 684 |
chart_buffer = create_metrics_chart(final_metrics)
|
| 685 |
print("Evaluation chart generated.")
|
| 686 |
|
| 687 |
+
# Return values matching outputs list
|
| 688 |
return (
|
| 689 |
"Evaluation Complete! See your results below.",
|
| 690 |
+
interview_state, # state (pass through, though not changed)
|
| 691 |
+
gr.update(visible=True, value=report_text), # Show and update report
|
| 692 |
+
gr.update(visible=True, value=chart_buffer) # Show and update chart
|
| 693 |
+
# 4 values total (Note: outputs list had 4 items, but function returns 4, so it should be fine)
|
| 694 |
+
# Actually, checking the listener again:
|
| 695 |
+
# outputs=[file_status_interview, interview_state, evaluation_report_display, evaluation_chart_display]
|
| 696 |
+
# So, 4 outputs, 4 returns. Correct.
|
| 697 |
)
|
| 698 |
except Exception as e:
|
| 699 |
error_msg = f"Error during evaluation submission: {str(e)}"
|
| 700 |
print(error_msg)
|
| 701 |
import traceback
|
| 702 |
traceback.print_exc()
|
| 703 |
+
# Return values matching outputs list on error
|
| 704 |
return (
|
| 705 |
error_msg,
|
| 706 |
+
interview_state, # state (pass through)
|
| 707 |
+
gr.update(visible=True, value=error_msg), # Show error in report area
|
| 708 |
+
gr.update(visible=False) # Hide chart
|
| 709 |
+
# 4 values total
|
| 710 |
)
|
| 711 |
|
| 712 |
# --- Login and Navigation Logic (Firebase Integrated) ---
|
|
|
|
| 748 |
)
|
| 749 |
|
| 750 |
def signup(email, password, username):
|
| 751 |
+
if not FIREBASE_AVAILABLE:
|
|
|
|
| 752 |
return (
|
| 753 |
+
"Firebase not initialized. Signup unavailable.",
|
| 754 |
+
gr.update(visible=True), gr.update(visible=False), gr.update(visible=False),
|
| 755 |
+
gr.update(visible=False), "", "", "", "", ""
|
| 756 |
+
)
|
| 757 |
if not email or not password or not username:
|
| 758 |
return (
|
| 759 |
+
"Please fill all fields.",
|
| 760 |
+
gr.update(visible=False), gr.update(visible=True), gr.update(visible=False),
|
| 761 |
+
gr.update(visible=False), email, password, username, "", ""
|
| 762 |
)
|
| 763 |
try:
|
| 764 |
user = auth.create_user(email=email, password=password, uid=username, display_name=username)
|
| 765 |
success_msg = f"Account created successfully for {username}!"
|
| 766 |
return (
|
| 767 |
+
success_msg,
|
| 768 |
+
gr.update(visible=True), gr.update(visible=False), gr.update(visible=False),
|
| 769 |
+
gr.update(visible=False), "", "", "", user.uid, user.email
|
| 770 |
)
|
| 771 |
except auth.UidAlreadyExistsError:
|
| 772 |
return (
|
| 773 |
+
"Username already exists. Please choose another.",
|
| 774 |
+
gr.update(visible=False), gr.update(visible=True), gr.update(visible=False),
|
| 775 |
+
gr.update(visible=False), email, password, username, "", ""
|
| 776 |
)
|
| 777 |
except auth.EmailAlreadyExistsError:
|
| 778 |
return (
|
| 779 |
+
"Email already exists. Please use another email.",
|
| 780 |
+
gr.update(visible=False), gr.update(visible=True), gr.update(visible=False),
|
| 781 |
+
gr.update(visible=False), email, password, username, "", ""
|
| 782 |
)
|
| 783 |
except Exception as e:
|
| 784 |
error_msg = f"Signup failed: {str(e)}"
|
| 785 |
print(error_msg)
|
| 786 |
return (
|
| 787 |
+
error_msg,
|
| 788 |
+
gr.update(visible=False), gr.update(visible=True), gr.update(visible=False),
|
| 789 |
+
gr.update(visible=False), email, password, username, "", ""
|
| 790 |
)
|
| 791 |
|
| 792 |
def logout():
|
|
|
|
| 897 |
process_chat_btn = gr.Button("Process Resume")
|
| 898 |
with gr.Column():
|
| 899 |
file_status_chat = gr.Textbox(label="Status", interactive=False)
|
| 900 |
+
chatbot = gr.Chatbot(label="Chat History", visible=False, type="messages")
|
| 901 |
query_input = gr.Textbox(label="Ask about your resume", placeholder="Type your question here...", visible=False)
|
| 902 |
send_btn = gr.Button("Send", visible=False)
|
| 903 |
else:
|
|
|
|
| 920 |
question_display, answer_instructions, audio_input,
|
| 921 |
submit_answer_btn, next_question_btn, submit_interview_btn,
|
| 922 |
answer_display, feedback_display, metrics_display,
|
| 923 |
+
processed_resume_data_hidden_interview # 13 outputs
|
| 924 |
]
|
| 925 |
)
|
| 926 |
|
| 927 |
start_interview_btn.click(
|
| 928 |
fn=start_interview,
|
| 929 |
inputs=[role_selection, processed_resume_data_hidden_interview],
|
|
|
|
| 930 |
outputs=[
|
| 931 |
file_status_interview, question_display,
|
| 932 |
+
# interview_state["questions"], interview_state["answers"], # REMOVED - Invalid
|
| 933 |
+
# interview_state["interactions"], interview_state["metrics_list"], # REMOVED - Invalid
|
| 934 |
+
# Outputs for UI updates
|
| 935 |
audio_input, submit_answer_btn, next_question_btn,
|
| 936 |
submit_interview_btn, feedback_display, metrics_display,
|
| 937 |
question_display, answer_instructions,
|
| 938 |
+
interview_state # Update the state object itself (11 outputs)
|
| 939 |
]
|
| 940 |
)
|
| 941 |
|
|
|
|
| 944 |
inputs=[audio_input, interview_state],
|
| 945 |
outputs=[
|
| 946 |
file_status_interview, answer_display, interview_state,
|
| 947 |
+
feedback_display, feedback_display, # Update value and visibility
|
| 948 |
+
metrics_display, metrics_display, # Update value and visibility
|
| 949 |
+
audio_input, submit_answer_btn, next_question_btn, # 13 outputs
|
| 950 |
submit_interview_btn, question_display, answer_instructions
|
| 951 |
]
|
| 952 |
)
|
|
|
|
| 959 |
audio_input, submit_answer_btn, next_question_btn,
|
| 960 |
feedback_display, metrics_display, submit_interview_btn,
|
| 961 |
question_display, answer_instructions,
|
| 962 |
+
answer_display, metrics_display # Clear previous answer/metrics display (13 outputs)
|
| 963 |
]
|
| 964 |
)
|
| 965 |
|
|
|
|
| 967 |
fn=submit_interview,
|
| 968 |
inputs=[interview_state],
|
| 969 |
outputs=[
|
| 970 |
+
file_status_interview, # Status message
|
| 971 |
+
interview_state, # State (passed through)
|
| 972 |
+
evaluation_report_display, # Show report
|
| 973 |
+
evaluation_chart_display # Show chart (4 outputs)
|
| 974 |
]
|
| 975 |
)
|
| 976 |
|
|
|
|
| 1028 |
outputs=[login_section, signup_section]
|
| 1029 |
)
|
| 1030 |
|
| 1031 |
+
|
| 1032 |
if __name__ == "__main__":
|
| 1033 |
demo.launch(server_name="0.0.0.0", server_port=7860, share=False)
|