Sa-m commited on
Commit
0f7edd8
·
verified ·
1 Parent(s): 359fda0

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +403 -280
app.py CHANGED
@@ -4,20 +4,19 @@ import os
4
  import tempfile
5
  import PyPDF2
6
  import google.generativeai as genai
7
- # import tensorflow as tf # Not directly used here, but models might need it
8
  from transformers import BertTokenizer, TFBertModel
9
  import numpy as np
10
  import speech_recognition as sr
11
- # from gtts import gTTS # Not used directly in main app logic here
12
- # import pygame # Not used directly in main app logic here
13
- import time
14
  from dotenv import load_dotenv
15
- import soundfile as sf # For saving audio numpy array
16
  import json
 
 
 
17
 
18
  # --- Firebase Admin SDK Setup ---
19
  import firebase_admin
20
- from firebase_admin import credentials, auth, firestore
21
 
22
  # Load environment variables
23
  load_dotenv()
@@ -32,20 +31,22 @@ def initialize_firebase():
32
  cred = None
33
  try:
34
  # Method 1: Use specific credentials file path
35
- firebase_credentials_path = "prepgenie-64134-firebase-adminsdk-fbsvc-3370ac4ab9.json"
36
- if os.path.exists(firebase_credentials_path):
37
  print(f"Initializing Firebase with credentials file: {firebase_credentials_path}")
38
  cred = credentials.Certificate(firebase_credentials_path)
39
  firebase_app = firebase_admin.initialize_app(cred)
40
  print("Firebase Admin initialized successfully using credentials file.")
41
  return firebase_app
 
 
42
  else:
43
  print(f"Firebase credentials file not found at {firebase_credentials_path}")
44
  except Exception as e:
45
  print(f"Failed to initialize Firebase using credentials file: {e}")
46
 
47
  try:
48
- # Method 2: Use JSON string from environment variable (useful for cloud deployments)
49
  firebase_credentials_json = os.getenv("FIREBASE_CREDENTIALS_JSON")
50
  if firebase_credentials_json:
51
  print("Initializing Firebase with credentials from FIREBASE_CREDENTIALS_JSON environment variable.")
@@ -62,17 +63,16 @@ def initialize_firebase():
62
  print(f"Failed to initialize Firebase using FIREBASE_CREDENTIALS_JSON: {e}")
63
 
64
  print("Warning: Firebase Admin SDK could not be initialized. Authentication features will not work.")
65
- return None # Indicate failure
66
 
67
- # --- Initialize Firebase when the module is loaded ---
68
  FIREBASE_APP = initialize_firebase()
69
  FIREBASE_AVAILABLE = FIREBASE_APP is not None
70
 
71
  # Configure Generative AI
72
- genai.configure(api_key=os.getenv("GOOGLE_API_KEY")) # Use environment variable or set a default
73
  text_model = genai.GenerativeModel("gemini-pro")
74
 
75
-
76
  try:
77
  model = TFBertModel.from_pretrained("bert-base-uncased")
78
  tokenizer = BertTokenizer.from_pretrained("bert-base-uncased")
@@ -83,10 +83,10 @@ except Exception as e:
83
  model = None
84
  tokenizer = None
85
 
86
- # --- Helper Functions (Logic from Streamlit - Interview) ---
87
 
88
  def getallinfo(data):
89
- if not data or not data.strip(): # Check for None or empty/whitespace
90
  return "No data provided or data is empty."
91
  text = f"""{data} is given by the user. Make sure you are getting the details like name, experience,
92
  education, skills of the user like in a resume. If the details are not provided return: not a resume.
@@ -98,87 +98,51 @@ def getallinfo(data):
98
  except Exception as e:
99
  print(f"Error in getallinfo: {e}")
100
  return "Error processing resume data."
101
- def file_processing(pdf_file_path): # Takes file path now (CORRECTED)
 
102
  """Processes the uploaded PDF file given its path."""
103
- if not pdf_file_path: # Handle None or empty path
104
- print("No file path provided to file_processing.")
105
  return ""
106
  try:
107
- # Ensure pdf_file_path is a string path, not a NamedString object directly
108
- # Gradio File component passes an object with a 'name' attribute containing the path
109
  if hasattr(pdf_file_path, 'name'):
110
  file_path_to_use = pdf_file_path.name
111
  else:
112
- # If it's already a string path (less common in recent Gradio versions for File uploads)
113
  file_path_to_use = pdf_file_path
114
 
115
- print(f"Attempting to process file: {file_path_to_use}")
116
-
117
- # Open the file using the resolved path
118
  with open(file_path_to_use, "rb") as f:
119
  reader = PyPDF2.PdfReader(f)
120
  text = ""
121
  for page in reader.pages:
122
  text += page.extract_text()
123
  return text
124
- except FileNotFoundError:
125
- print(f"File not found: {file_path_to_use}")
126
- return ""
127
- except PyPDF2.errors.PdfReadError as e:
128
- print(f"Error reading PDF file {file_path_to_use}: {e}")
129
- return ""
130
- except Exception as e: # Catch other potential errors
131
- print(f"Unexpected error processing PDF {pdf_file_path}: {e}")
132
- return ""
133
-
134
-
135
- def getallinfo(data):
136
- """Formats resume data."""
137
- if not data or not data.strip(): # Check for None or empty/whitespace
138
- return "No data provided or data is empty."
139
- # ... (rest of getallinfo logic remains the same) ...
140
- text = f"""{data} is given by the user. Make sure you are getting the details like name, experience,
141
- education, skills of the user like in a resume. If the details are not provided return: not a resume.
142
- If details are provided then please try again and format the whole in a single paragraph covering all the information. """
143
- try:
144
- response = text_model.generate_content(text)
145
- response.resolve()
146
- return response.text
147
  except Exception as e:
148
- print(f"Error in getallinfo: {e}")
149
- return "Error processing resume data."
150
-
151
 
152
  def get_embedding(text):
153
  if not text or not text.strip():
154
- print("Empty text provided for embedding.")
155
- return np.zeros((1, 768)) # Return dummy embedding for empty text
156
 
157
  if not BERT_AVAILABLE or not model or not tokenizer:
158
  print("BERT model not available for embedding.")
159
- # Return a dummy embedding or handle the error appropriately
160
- return np.zeros((1, 768)) # Dummy embedding size for bert-base-uncased
161
 
162
  try:
163
- # Add padding/truncation to handle variable lengths robustly
164
  encoded_text = tokenizer(text, return_tensors="tf", truncation=True, padding=True, max_length=512)
165
  output = model(encoded_text)
166
- embedding = output.last_hidden_state[:, 0, :] # CLS token embedding
167
- return embedding.numpy() # Convert to numpy for easier handling
168
  except Exception as e:
169
  print(f"Error getting embedding: {e}")
170
- return np.zeros((1, 768)) # Return dummy embedding on error
171
 
172
  def generate_feedback(question, answer):
173
- # Handle empty inputs
174
  if not question or not question.strip() or not answer or not answer.strip():
175
  return "0.00"
176
 
177
  try:
178
  question_embedding = get_embedding(question)
179
  answer_embedding = get_embedding(answer)
180
- # Calculate cosine similarity (ensure correct shapes)
181
- # np.dot expects 1D or 2D arrays. Squeeze to remove single-dimensional entries.
182
  q_emb = np.squeeze(question_embedding)
183
  a_emb = np.squeeze(answer_embedding)
184
 
@@ -188,18 +152,16 @@ def generate_feedback(question, answer):
188
  similarity_score = 0.0
189
  else:
190
  similarity_score = dot_product / norms
191
- return f"{similarity_score:.2f}" # Format as string with 2 decimal places
192
  except Exception as e:
193
  print(f"Error generating feedback: {e}")
194
  return "0.00"
195
 
196
  def generate_questions(roles, data):
197
- # Handle empty inputs
198
  if not roles or (isinstance(roles, list) and not any(roles)) or not data or not data.strip():
199
  return ["Could you please introduce yourself based on your resume?"]
200
 
201
  questions = []
202
- # Ensure roles is a list and join if needed
203
  if isinstance(roles, list):
204
  roles_str = ", ".join(roles)
205
  else:
@@ -214,20 +176,16 @@ def generate_questions(roles, data):
214
  ask 2 questions only. directly ask the questions not anything else.
215
  Also ask the questions in a polite way. Ask the questions in a way that the candidate can understand the question.
216
  and make sure the questions are related to these metrics: Communication skills, Teamwork and collaboration,
217
- Problem-solving and critical thinking, Time management and organization, Adaptability and resilience. dont
218
- tell anything else just give me the questions. if there is a limit in no of questions, ask or try questions that covers
219
- all need."""
220
  try:
221
  response = text_model.generate_content(text)
222
  response.resolve()
223
  questions_text = response.text.strip()
224
- # Split by newline, question mark, or period. Filter out empty strings.
225
  questions = [q.strip() for q in questions_text.split('\n') if q.strip()]
226
  if not questions:
227
  questions = [q.strip() for q in questions_text.split('?') if q.strip()]
228
  if not questions:
229
  questions = [q.strip() for q in questions_text.split('.') if q.strip()]
230
- # Ensure we only get up to 2 questions
231
  questions = questions[:2] if questions else ["Could you please introduce yourself based on your resume?"]
232
  except Exception as e:
233
  print(f"Error generating questions: {e}")
@@ -235,11 +193,9 @@ def generate_questions(roles, data):
235
  return questions
236
 
237
  def generate_overall_feedback(data, percent, answer, questions):
238
- # Handle empty inputs
239
  if not data or not data.strip() or not answer or not answer.strip() or not questions:
240
  return "Unable to generate feedback due to missing information."
241
 
242
- # Ensure percent is a string for formatting, handle potential float input
243
  if isinstance(percent, float):
244
  percent_str = f"{percent:.2f}"
245
  else:
@@ -263,9 +219,7 @@ def generate_overall_feedback(data, percent, answer, questions):
263
  return "Feedback could not be generated."
264
 
265
  def generate_metrics(data, answer, question):
266
- # Handle empty inputs
267
  if not data or not data.strip() or not answer or not answer.strip() or not question or not question.strip():
268
- # Return default 0 metrics for empty inputs
269
  return {
270
  "Communication skills": 0.0, "Teamwork and collaboration": 0.0,
271
  "Problem-solving and critical thinking": 0.0, "Time management and organization": 0.0,
@@ -293,20 +247,16 @@ def generate_metrics(data, answer, question):
293
  response = text_model.generate_content(text)
294
  response.resolve()
295
  metrics_text = response.text.strip()
296
- # Parse the metrics text
297
  for line in metrics_text.split('\n'):
298
  if ':' in line:
299
  key, value_str = line.split(':', 1)
300
  key = key.strip()
301
  try:
302
- # Handle potential extra characters after the number
303
- value_clean = value_str.strip().split()[0] # Take first token
304
  value = float(value_clean)
305
  metrics[key] = value
306
  except (ValueError, IndexError):
307
- # If parsing fails, set to 0
308
  metrics[key] = 0.0
309
- # Ensure all expected metrics are present
310
  expected_metrics = [
311
  "Communication skills", "Teamwork and collaboration",
312
  "Problem-solving and critical thinking", "Time management and organization",
@@ -315,10 +265,8 @@ def generate_metrics(data, answer, question):
315
  for m in expected_metrics:
316
  if m not in metrics:
317
  metrics[m] = 0.0
318
-
319
  except Exception as e:
320
  print(f"Error generating metrics: {e}")
321
- # Return default 0 metrics on error
322
  metrics = {
323
  "Communication skills": 0.0, "Teamwork and collaboration": 0.0,
324
  "Problem-solving and critical thinking": 0.0, "Time management and organization": 0.0,
@@ -326,9 +274,131 @@ def generate_metrics(data, answer, question):
326
  }
327
  return metrics
328
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
329
  def process_resume(file_obj):
330
  """Handles resume upload and processing."""
331
- print(f"Received file object in process_resume: {file_obj}")
332
  if not file_obj:
333
  return (
334
  "Please upload a PDF resume.",
@@ -338,17 +408,14 @@ def process_resume(file_obj):
338
  gr.update(visible=False), gr.update(visible=False),
339
  gr.update(visible=False), gr.update(visible=False),
340
  gr.update(visible=False), gr.update(visible=False),
341
- gr.update(visible=False) # 13 values
342
  )
343
 
344
  try:
345
- # Use the file path correctly
346
  if hasattr(file_obj, 'name'):
347
  file_path = file_obj.name
348
  else:
349
- file_path = str(file_obj) # Ensure it's a string
350
-
351
- print(f"Processing file at path: {file_path}")
352
 
353
  raw_text = file_processing(file_path)
354
  if not raw_text or not raw_text.strip():
@@ -360,25 +427,19 @@ def process_resume(file_obj):
360
  gr.update(visible=False), gr.update(visible=False),
361
  gr.update(visible=False), gr.update(visible=False),
362
  gr.update(visible=False), gr.update(visible=False),
363
- gr.update(visible=False) # 13 values
364
  )
365
 
366
  processed_data = getallinfo(raw_text)
367
-
368
  return (
369
  f"File processed successfully!",
370
- gr.update(visible=True), # role_selection
371
- gr.update(visible=True), # start_interview_btn
372
- gr.update(visible=False), # question_display
373
- gr.update(visible=False), # answer_instructions
374
- gr.update(visible=False), # audio_input
375
- gr.update(visible=False), # submit_answer_btn
376
- gr.update(visible=False), # next_question_btn
377
- gr.update(visible=False), # submit_interview_btn
378
- gr.update(visible=False), # answer_display
379
- gr.update(visible=False), # feedback_display
380
- gr.update(visible=False), # metrics_display
381
- processed_data # processed_resume_data_hidden_interview (13th value)
382
  )
383
  except Exception as e:
384
  error_msg = f"Error processing file: {str(e)}"
@@ -391,46 +452,87 @@ def process_resume(file_obj):
391
  gr.update(visible=False), gr.update(visible=False),
392
  gr.update(visible=False), gr.update(visible=False),
393
  gr.update(visible=False), gr.update(visible=False),
394
- gr.update(visible=False) # 13 values
 
 
 
 
 
 
 
 
 
 
 
 
395
  )
396
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
397
 
398
  def submit_answer(audio, interview_state):
399
  """Handles submitting an answer via audio."""
400
  if not audio or not interview_state:
401
- return ("No audio recorded or interview not started.", "", interview_state,
402
- gr.update(visible=False), gr.update(visible=False), gr.update(visible=False),
403
- gr.update(visible=False), gr.update(visible=True), gr.update(visible=True),
404
- gr.update(visible=True), gr.update(visible=False), gr.update(visible=True),
405
- gr.update(visible=True))
 
 
 
406
 
407
  try:
408
- # Save audio to a temporary file
409
  temp_dir = tempfile.mkdtemp()
410
  audio_file_path = os.path.join(temp_dir, "recorded_audio.wav")
411
- # audio is a tuple (sample_rate, numpy_array)
412
  sample_rate, audio_data = audio
413
- # Use soundfile to save the numpy array as a WAV file
414
  sf.write(audio_file_path, audio_data, sample_rate)
415
 
416
- # Convert audio file to text
417
  r = sr.Recognizer()
418
  with sr.AudioFile(audio_file_path) as source:
419
  audio_data_sr = r.record(source)
420
  answer_text = r.recognize_google(audio_data_sr)
421
  print(f"Recognized Answer: {answer_text}")
422
 
423
- # Clean up temporary audio file
424
  os.remove(audio_file_path)
425
  os.rmdir(temp_dir)
426
 
427
- # Update state with the answer
428
  interview_state["answers"].append(answer_text)
429
  current_q_index = interview_state["current_q_index"]
430
  current_question = interview_state["questions"][current_q_index]
431
  interview_state["interactions"][f"Q{current_q_index + 1}: {current_question}"] = f"A{current_q_index + 1}: {answer_text}"
432
 
433
- # Generate feedback and metrics for the current question
434
  percent_str = generate_feedback(current_question, answer_text)
435
  try:
436
  percent = float(percent_str)
@@ -441,43 +543,42 @@ def submit_answer(audio, interview_state):
441
  interview_state["feedback"].append(feedback_text)
442
 
443
  metrics = generate_metrics(interview_state["resume_data"], answer_text, current_question)
444
- interview_state["metrics_list"].append(metrics) # Store metrics for this question
445
 
446
- # Update state index
447
  interview_state["current_q_index"] += 1
448
 
449
  return (
450
  f"Answer submitted: {answer_text}",
451
  answer_text,
452
  interview_state,
453
- gr.update(visible=True), # Show feedback textbox
454
- gr.update(value=feedback_text, visible=True), # Update feedback textbox
455
- gr.update(visible=True), # Show metrics display
456
- gr.update(value=metrics, visible=True), # Update metrics display
457
- gr.update(visible=True), # Keep audio input visible for next question
458
- gr.update(visible=True), # Keep submit answer button
459
- gr.update(visible=True), # Keep next question button
460
- gr.update(visible=False), # Submit interview button still hidden
461
- gr.update(visible=True), # Question display
462
- gr.update(visible=True) # Answer instructions
463
  )
464
 
465
  except Exception as e:
466
  print(f"Error processing audio answer: {e}")
467
- return ("Error processing audio. Please try again.", "", interview_state,
468
- gr.update(visible=False), gr.update(visible=False), gr.update(visible=False),
469
- gr.update(visible=False), gr.update(visible=True), gr.update(visible=True),
470
- gr.update(visible=True), gr.update(visible=False), gr.update(visible=True),
471
- gr.update(visible=True))
 
 
 
472
 
473
  def next_question(interview_state):
474
  """Moves to the next question or ends the interview."""
475
  if not interview_state:
476
- return ("Interview not started.", "", interview_state, gr.update(visible=True),
477
- gr.update(visible=True), gr.update(visible=True), gr.update(visible=False),
478
- gr.update(visible=False), gr.update(visible=False), gr.update(visible=False),
479
- gr.update(visible=False), gr.update(visible=True), gr.update(visible=True),
480
- gr.update(visible=False), gr.update(visible=False))
 
 
 
481
 
482
  current_q_index = interview_state["current_q_index"]
483
  total_questions = len(interview_state["questions"])
@@ -488,123 +589,174 @@ def next_question(interview_state):
488
  f"Question {current_q_index + 1}/{total_questions}",
489
  next_q,
490
  interview_state,
491
- gr.update(visible=True), # Audio input
492
- gr.update(visible=True), # Submit Answer
493
- gr.update(visible=True), # Next Question
494
- gr.update(visible=False), # Feedback textbox (hidden for new question)
495
- gr.update(visible=False), # Metrics display (hidden for new question)
496
- gr.update(visible=False), # Submit Interview (still hidden)
497
- gr.update(visible=True), # Question display
498
- gr.update(visible=True), # Answer instructions
499
- "", # Clear previous answer display
500
- {} # Clear previous metrics display
501
  )
502
  else:
503
- # Interview finished
504
  return (
505
  "Interview completed! Click 'Submit Interview' to see your evaluation.",
506
  "Interview Finished",
507
  interview_state,
508
- gr.update(visible=False), # Hide audio input
509
- gr.update(visible=False), # Hide submit answer
510
- gr.update(visible=False), # Hide next question
511
- gr.update(visible=False), # Hide feedback textbox
512
- gr.update(visible=False), # Hide metrics display
513
- gr.update(visible=True), # Show submit interview button
514
- gr.update(visible=True), # Question display (shows finished)
515
- gr.update(visible=False), # Hide answer instructions
516
- "", # Clear answer display
517
- {} # Clear metrics display
518
  )
519
 
520
  def submit_interview(interview_state):
521
- """Handles final submission and triggers evaluation."""
522
- if not interview_state:
523
- return "Interview state is missing.", interview_state
 
 
 
 
524
 
525
- # The evaluation logic would typically be triggered here or handled in a separate function.
526
- # For now, we'll just indicate it's ready.
527
- print("Interview submitted for evaluation.")
528
- print("Final State:", interview_state)
529
- # In a full implementation, you might call an evaluation function here
530
- # or redirect to an evaluation page/component.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
531
 
532
- return "Interview submitted successfully!", interview_state
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
533
 
534
  # --- Login and Navigation Logic (Firebase Integrated) ---
535
 
536
  def login(email, password):
537
- # Check if Firebase is available
538
  if not FIREBASE_AVAILABLE:
539
- return ("Firebase not initialized. Login unavailable.", gr.update(visible=True), gr.update(visible=False), gr.update(visible=False), gr.update(visible=False), email, password, "", "")
540
-
 
 
 
541
  if not email or not password:
542
- return ("Please enter email and password.", gr.update(visible=True), gr.update(visible=False), gr.update(visible=False), gr.update(visible=False), email, password, "", "")
543
-
 
 
 
544
  try:
545
- # Attempt to get user by email (checks existence)
546
- # Note: This does NOT verify the password in a secure way for a client-side app.
547
- # A production app needs server-side verification or ID token validation.
548
  user = auth.get_user_by_email(email)
549
- welcome_msg = f"Welcome, {user.display_name or user.uid}!" # Use display name or UID
550
- # Show main app, hide login/signup
551
- return (welcome_msg,
552
- gr.update(visible=False), # login_section
553
- gr.update(visible=False), # signup_section
554
- gr.update(visible=True), # main_app
555
- "", "", # Clear email/password inputs
556
- user.uid, # Update user_state with UID
557
- user.email) # Update user_email_state
558
  except auth.UserNotFoundError:
559
- return ("User not found. Please check your email or sign up.", gr.update(visible=True), gr.update(visible=False), gr.update(visible=False), gr.update(visible=False), email, password, "", "")
 
 
 
 
560
  except Exception as e:
561
  error_msg = f"Login failed: {str(e)}"
562
  print(error_msg)
563
- return (error_msg, gr.update(visible=True), gr.update(visible=False), gr.update(visible=False), gr.update(visible=False), email, password, "", "")
 
 
 
 
564
 
565
  def signup(email, password, username):
566
- # Check if Firebase is available
567
- if not FIREBASE_AVAILABLE:
568
- return ("Firebase not initialized. Signup unavailable.", gr.update(visible=True), gr.update(visible=False), gr.update(visible=False), gr.update(visible=False), email, password, username, "", "")
 
 
 
569
  if not email or not password or not username:
570
- return ("Please fill all fields.", gr.update(visible=True), gr.update(visible=False), gr.update(visible=False), gr.update(visible=False), email, password, username, "", "")
571
-
 
 
 
572
  try:
573
- # Create user in Firebase
574
  user = auth.create_user(email=email, password=password, uid=username, display_name=username)
575
  success_msg = f"Account created successfully for {username}!"
576
- # Switch to login view after successful signup
577
- return (success_msg,
578
- gr.update(visible=True), # Show login section
579
- gr.update(visible=False), # Hide signup section
580
- gr.update(visible=False), # Keep main app hidden
581
- "", "", "", # Clear email/password/username inputs
582
- user.uid, user.email) # Set user state (though they still need to login)
583
  except auth.UidAlreadyExistsError:
584
- return ("Username already exists. Please choose another.", gr.update(visible=False), gr.update(visible=True), gr.update(visible=False), gr.update(visible=False), email, password, username, "", "")
 
 
 
 
585
  except auth.EmailAlreadyExistsError:
586
- return ("Email already exists. Please use another email.", gr.update(visible=False), gr.update(visible=True), gr.update(visible=False), gr.update(visible=False), email, password, username, "", "")
 
 
 
 
587
  except Exception as e:
588
  error_msg = f"Signup failed: {str(e)}"
589
  print(error_msg)
590
- return (error_msg, gr.update(visible=False), gr.update(visible=True), gr.update(visible=False), gr.update(visible=False), email, password, username, "", "")
 
 
 
 
591
 
592
  def logout():
593
- return ("", # Clear login status
594
- gr.update(visible=True), # Show login section
595
- gr.update(visible=False), # Hide signup section
596
- gr.update(visible=False), # Hide main app
597
- "", "", "", # Clear email/password/username inputs
598
- "", "") # Clear user_state and user_email_state
599
 
600
  def navigate_to_interview():
601
- return (gr.update(visible=True), gr.update(visible=False)) # Show interview, hide chat
602
 
603
  def navigate_to_chat():
604
- return (gr.update(visible=False), gr.update(visible=True)) # Hide interview, show chat
605
 
606
  # --- Import Chat Module Functions ---
607
- # Assuming chat.py is in the same directory or correctly in the Python path
608
  try:
609
  from login_module import chat as chat_module
610
  CHAT_MODULE_AVAILABLE = True
@@ -615,16 +767,11 @@ except ImportError as e:
615
  chat_module = None
616
 
617
  # --- Gradio Interface ---
618
-
619
  with gr.Blocks(title="PrepGenie - Mock Interview") as demo:
620
  gr.Markdown("# 🦈 PrepGenie")
621
- # State to hold interview data
622
  interview_state = gr.State({})
623
- # State for username/UID
624
  user_state = gr.State("")
625
- # State for user email
626
  user_email_state = gr.State("")
627
- # State for processed resume data (used by both interview and chat)
628
  processed_resume_data_state = gr.State("")
629
 
630
  # --- Login Section ---
@@ -634,7 +781,6 @@ with gr.Blocks(title="PrepGenie - Mock Interview") as demo:
634
  login_password_input = gr.Textbox(label="Password", type="password")
635
  login_btn = gr.Button("Login")
636
  login_status = gr.Textbox(label="Login Status", interactive=False)
637
- # Switch to Signup
638
  switch_to_signup_btn = gr.Button("Don't have an account? Sign Up")
639
 
640
  # --- Signup Section ---
@@ -645,16 +791,14 @@ with gr.Blocks(title="PrepGenie - Mock Interview") as demo:
645
  signup_username_input = gr.Textbox(label="Unique Username")
646
  signup_btn = gr.Button("Create my account")
647
  signup_status = gr.Textbox(label="Signup Status", interactive=False)
648
- # Switch to Login
649
  switch_to_login_btn = gr.Button("Already have an account? Login")
650
 
651
- # --- Main App Sections (Initially Hidden) ---
652
  with gr.Column(visible=False) as main_app:
653
  with gr.Row():
654
  with gr.Column(scale=1):
655
  logout_btn = gr.Button("Logout")
656
  with gr.Column(scale=4):
657
- # Dynamic welcome message (basic approach)
658
  welcome_display = gr.Markdown("### Welcome, User!")
659
 
660
  with gr.Row():
@@ -668,7 +812,6 @@ with gr.Blocks(title="PrepGenie - Mock Interview") as demo:
668
  # --- Interview Section ---
669
  with gr.Column(visible=False) as interview_selection:
670
  gr.Markdown("## Mock Interview")
671
- # File Upload Section
672
  with gr.Row():
673
  with gr.Column():
674
  file_upload_interview = gr.File(label="Upload Resume (PDF)", file_types=[".pdf"])
@@ -676,45 +819,39 @@ with gr.Blocks(title="PrepGenie - Mock Interview") as demo:
676
  with gr.Column():
677
  file_status_interview = gr.Textbox(label="Status", interactive=False)
678
 
679
- # Role Selection (Initially hidden)
680
  role_selection = gr.Dropdown(
681
  choices=["Data Scientist", "Software Engineer", "Product Manager", "Data Analyst", "Business Analyst"],
682
- multiselect=True,
683
- label="Select Job Role(s)",
684
- visible=False
685
  )
686
  start_interview_btn = gr.Button("Start Interview", visible=False)
687
-
688
- # Interview Section (Initially hidden)
689
  question_display = gr.Textbox(label="Question", interactive=False, visible=False)
690
  answer_instructions = gr.Markdown("Click 'Record Answer' and speak your response.", visible=False)
691
  audio_input = gr.Audio(label="Record Answer", type="numpy", visible=False)
692
  submit_answer_btn = gr.Button("Submit Answer", visible=False)
693
  next_question_btn = gr.Button("Next Question", visible=False)
694
  submit_interview_btn = gr.Button("Submit Interview", visible=False, variant="primary")
695
-
696
- # Feedback and Metrics (Initially hidden)
697
  answer_display = gr.Textbox(label="Your Answer", interactive=False, visible=False)
698
  feedback_display = gr.Textbox(label="Feedback", interactive=False, visible=False)
699
  metrics_display = gr.JSON(label="Metrics", visible=False)
700
-
701
- # Hidden textbox to hold processed resume data temporarily for interview
702
  processed_resume_data_hidden_interview = gr.Textbox(visible=False)
703
 
 
 
 
 
 
 
704
  # --- Chat Section ---
705
  if CHAT_MODULE_AVAILABLE:
706
  with gr.Column(visible=False) as chat_selection:
707
  gr.Markdown("## Chat with Resume")
708
- # File Upload Section (Chat uses its own upload)
709
  with gr.Row():
710
  with gr.Column():
711
  file_upload_chat = gr.File(label="Upload Resume (PDF)", file_types=[".pdf"])
712
  process_chat_btn = gr.Button("Process Resume")
713
  with gr.Column():
714
  file_status_chat = gr.Textbox(label="Status", interactive=False)
715
-
716
- # Chat Section (Initially hidden)
717
- chatbot = gr.Chatbot(label="Chat History", visible=False)
718
  query_input = gr.Textbox(label="Ask about your resume", placeholder="Type your question here...", visible=False)
719
  send_btn = gr.Button("Send", visible=False)
720
  else:
@@ -722,69 +859,51 @@ with gr.Blocks(title="PrepGenie - Mock Interview") as demo:
722
  gr.Markdown("## Chat with Resume (Unavailable)")
723
  gr.Textbox(value="Chat module is not available.", interactive=False)
724
 
725
-
726
- # Navigation buttons
727
  interview_view = interview_selection
728
  chat_view = chat_selection
729
-
730
  interview_btn.click(fn=navigate_to_interview, inputs=None, outputs=[interview_view, chat_view])
731
  if CHAT_MODULE_AVAILABLE:
732
  chat_btn.click(fn=navigate_to_chat, inputs=None, outputs=[interview_view, chat_view])
733
- # Update welcome message when user_state changes (basic)
734
- # Note: Gradio State change listeners might not work as expected for UI updates in all cases.
735
- # An alternative is to update the welcome message in the login/logout functions directly.
736
- # user_state.change(fn=lambda user: f"### Welcome, {user}!" if user else "### Welcome, User!", inputs=[user_state], outputs=[welcome_display])
737
 
738
  # --- Event Listeners for Interview ---
739
- # Process Resume (Interview)
740
  process_btn_interview.click(
741
  fn=process_resume,
742
  inputs=[file_upload_interview],
743
  outputs=[
744
- file_status_interview, # 1
745
- role_selection, # 2
746
- start_interview_btn, # 3
747
- question_display, # 4
748
- answer_instructions, # 5
749
- audio_input, # 6
750
- submit_answer_btn, # 7
751
- next_question_btn, # 8
752
- submit_interview_btn, # 9
753
- answer_display, # 10
754
- feedback_display, # 11
755
- metrics_display, # 12
756
- processed_resume_data_hidden_interview # 13 - This is where processed_data goes
757
- ] # Exactly 13 outputs
758
  )
759
 
760
- # Start Interview
761
  start_interview_btn.click(
762
  fn=start_interview,
763
  inputs=[role_selection, processed_resume_data_hidden_interview],
764
  outputs=[
765
  file_status_interview, question_display,
766
- # Outputs for UI updates
 
767
  audio_input, submit_answer_btn, next_question_btn,
768
  submit_interview_btn, feedback_display, metrics_display,
769
- question_display, answer_instructions, # These are UI updates
770
- interview_state # Update the state object itself
771
  ]
772
  )
773
 
774
- # Submit Answer
775
  submit_answer_btn.click(
776
  fn=submit_answer,
777
  inputs=[audio_input, interview_state],
778
  outputs=[
779
  file_status_interview, answer_display, interview_state,
780
- feedback_display, feedback_display, # Update value and visibility
781
- metrics_display, metrics_display, # Update value and visibility
782
  audio_input, submit_answer_btn, next_question_btn,
783
  submit_interview_btn, question_display, answer_instructions
784
  ]
785
  )
786
 
787
- # Next Question
788
  next_question_btn.click(
789
  fn=next_question,
790
  inputs=[interview_state],
@@ -793,34 +912,34 @@ with gr.Blocks(title="PrepGenie - Mock Interview") as demo:
793
  audio_input, submit_answer_btn, next_question_btn,
794
  feedback_display, metrics_display, submit_interview_btn,
795
  question_display, answer_instructions,
796
- answer_display, metrics_display # Clear previous answer/metrics display
797
  ]
798
  )
799
 
800
- # Submit Interview (Placeholder for evaluation trigger)
801
  submit_interview_btn.click(
802
  fn=submit_interview,
803
  inputs=[interview_state],
804
- outputs=[file_status_interview, interview_state]
805
- # In a full app, you might navigate to an evaluation page here
 
 
 
 
806
  )
807
 
808
- # --- Event Listeners for Chat (if available) ---
809
  if CHAT_MODULE_AVAILABLE:
810
- # Process Resume for Chat
811
  process_chat_btn.click(
812
  fn=chat_module.process_resume_chat,
813
  inputs=[file_upload_chat],
814
  outputs=[file_status_chat, processed_resume_data_state, query_input, send_btn, chatbot]
815
  )
816
-
817
- # Chat Interaction
818
  send_btn.click(
819
  fn=chat_module.chat_with_resume,
820
- inputs=[query_input, processed_resume_data_state, chatbot], # chatbot provides history
821
- outputs=[query_input, chatbot] # Update input (clear) and chatbot (new history)
822
  )
823
- query_input.submit( # Allow submitting with Enter key
824
  fn=chat_module.chat_with_resume,
825
  inputs=[query_input, processed_resume_data_state, chatbot],
826
  outputs=[query_input, chatbot]
@@ -830,22 +949,26 @@ with gr.Blocks(title="PrepGenie - Mock Interview") as demo:
830
  login_btn.click(
831
  fn=login,
832
  inputs=[login_email_input, login_password_input],
833
- outputs=[login_status, login_section, signup_section, main_app, login_email_input, login_password_input, user_state, user_email_state]
 
834
  )
835
 
836
  signup_btn.click(
837
  fn=signup,
838
  inputs=[signup_email_input, signup_password_input, signup_username_input],
839
- outputs=[signup_status, login_section, signup_section, main_app, signup_email_input, signup_password_input, signup_username_input, user_state, user_email_state]
 
 
840
  )
841
 
842
  logout_btn.click(
843
  fn=logout,
844
  inputs=None,
845
- outputs=[login_status, login_section, signup_section, main_app, login_email_input, login_password_input, signup_username_input, user_state, user_email_state]
 
 
846
  )
847
 
848
- # Switch between Login and Signup
849
  switch_to_signup_btn.click(
850
  fn=lambda: (gr.update(visible=False), gr.update(visible=True)),
851
  inputs=None,
@@ -860,4 +983,4 @@ with gr.Blocks(title="PrepGenie - Mock Interview") as demo:
860
 
861
  # Run the app
862
  if __name__ == "__main__":
863
- demo.launch(share=True) # You can add server_name="0.0.0.0", server_port=7860 for external access
 
4
  import tempfile
5
  import PyPDF2
6
  import google.generativeai as genai
 
7
  from transformers import BertTokenizer, TFBertModel
8
  import numpy as np
9
  import speech_recognition as sr
 
 
 
10
  from dotenv import load_dotenv
11
+ import soundfile as sf
12
  import json
13
+ import matplotlib.pyplot as plt
14
+ import io
15
+ import re
16
 
17
  # --- Firebase Admin SDK Setup ---
18
  import firebase_admin
19
+ from firebase_admin import credentials, auth
20
 
21
  # Load environment variables
22
  load_dotenv()
 
31
  cred = None
32
  try:
33
  # Method 1: Use specific credentials file path
34
+ firebase_credentials_path = os.getenv("FIREBASE_CREDENTIALS_PATH", "prepgenie-64134-firebase-adminsdk-fbsvc-3370ac4ab9.json")
35
+ if firebase_credentials_path and os.path.exists(firebase_credentials_path):
36
  print(f"Initializing Firebase with credentials file: {firebase_credentials_path}")
37
  cred = credentials.Certificate(firebase_credentials_path)
38
  firebase_app = firebase_admin.initialize_app(cred)
39
  print("Firebase Admin initialized successfully using credentials file.")
40
  return firebase_app
41
+ elif not firebase_credentials_path:
42
+ print("FIREBASE_CREDENTIALS_PATH is not set or is None.")
43
  else:
44
  print(f"Firebase credentials file not found at {firebase_credentials_path}")
45
  except Exception as e:
46
  print(f"Failed to initialize Firebase using credentials file: {e}")
47
 
48
  try:
49
+ # Method 2: Use JSON string from environment variable
50
  firebase_credentials_json = os.getenv("FIREBASE_CREDENTIALS_JSON")
51
  if firebase_credentials_json:
52
  print("Initializing Firebase with credentials from FIREBASE_CREDENTIALS_JSON environment variable.")
 
63
  print(f"Failed to initialize Firebase using FIREBASE_CREDENTIALS_JSON: {e}")
64
 
65
  print("Warning: Firebase Admin SDK could not be initialized. Authentication features will not work.")
66
+ return None
67
 
 
68
  FIREBASE_APP = initialize_firebase()
69
  FIREBASE_AVAILABLE = FIREBASE_APP is not None
70
 
71
  # Configure Generative AI
72
+ genai.configure(api_key=os.getenv("GOOGLE_API_KEY") or "YOUR_DEFAULT_API_KEY_HERE")
73
  text_model = genai.GenerativeModel("gemini-pro")
74
 
75
+ # Load BERT model and tokenizer
76
  try:
77
  model = TFBertModel.from_pretrained("bert-base-uncased")
78
  tokenizer = BertTokenizer.from_pretrained("bert-base-uncased")
 
83
  model = None
84
  tokenizer = None
85
 
86
+ # --- Helper Functions (Logic adapted from Streamlit) ---
87
 
88
  def getallinfo(data):
89
+ if not data or not data.strip():
90
  return "No data provided or data is empty."
91
  text = f"""{data} is given by the user. Make sure you are getting the details like name, experience,
92
  education, skills of the user like in a resume. If the details are not provided return: not a resume.
 
98
  except Exception as e:
99
  print(f"Error in getallinfo: {e}")
100
  return "Error processing resume data."
101
+
102
+ def file_processing(pdf_file_path):
103
  """Processes the uploaded PDF file given its path."""
104
+ if not pdf_file_path:
 
105
  return ""
106
  try:
 
 
107
  if hasattr(pdf_file_path, 'name'):
108
  file_path_to_use = pdf_file_path.name
109
  else:
 
110
  file_path_to_use = pdf_file_path
111
 
 
 
 
112
  with open(file_path_to_use, "rb") as f:
113
  reader = PyPDF2.PdfReader(f)
114
  text = ""
115
  for page in reader.pages:
116
  text += page.extract_text()
117
  return text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
118
  except Exception as e:
119
+ print(f"Error processing PDF {pdf_file_path}: {e}")
120
+ return ""
 
121
 
122
  def get_embedding(text):
123
  if not text or not text.strip():
124
+ return np.zeros((1, 768))
 
125
 
126
  if not BERT_AVAILABLE or not model or not tokenizer:
127
  print("BERT model not available for embedding.")
128
+ return np.zeros((1, 768))
 
129
 
130
  try:
 
131
  encoded_text = tokenizer(text, return_tensors="tf", truncation=True, padding=True, max_length=512)
132
  output = model(encoded_text)
133
+ embedding = output.last_hidden_state[:, 0, :]
134
+ return embedding.numpy()
135
  except Exception as e:
136
  print(f"Error getting embedding: {e}")
137
+ return np.zeros((1, 768))
138
 
139
  def generate_feedback(question, answer):
 
140
  if not question or not question.strip() or not answer or not answer.strip():
141
  return "0.00"
142
 
143
  try:
144
  question_embedding = get_embedding(question)
145
  answer_embedding = get_embedding(answer)
 
 
146
  q_emb = np.squeeze(question_embedding)
147
  a_emb = np.squeeze(answer_embedding)
148
 
 
152
  similarity_score = 0.0
153
  else:
154
  similarity_score = dot_product / norms
155
+ return f"{similarity_score:.2f}"
156
  except Exception as e:
157
  print(f"Error generating feedback: {e}")
158
  return "0.00"
159
 
160
  def generate_questions(roles, data):
 
161
  if not roles or (isinstance(roles, list) and not any(roles)) or not data or not data.strip():
162
  return ["Could you please introduce yourself based on your resume?"]
163
 
164
  questions = []
 
165
  if isinstance(roles, list):
166
  roles_str = ", ".join(roles)
167
  else:
 
176
  ask 2 questions only. directly ask the questions not anything else.
177
  Also ask the questions in a polite way. Ask the questions in a way that the candidate can understand the question.
178
  and make sure the questions are related to these metrics: Communication skills, Teamwork and collaboration,
179
+ Problem-solving and critical thinking, Time management and organization, Adaptability and resilience."""
 
 
180
  try:
181
  response = text_model.generate_content(text)
182
  response.resolve()
183
  questions_text = response.text.strip()
 
184
  questions = [q.strip() for q in questions_text.split('\n') if q.strip()]
185
  if not questions:
186
  questions = [q.strip() for q in questions_text.split('?') if q.strip()]
187
  if not questions:
188
  questions = [q.strip() for q in questions_text.split('.') if q.strip()]
 
189
  questions = questions[:2] if questions else ["Could you please introduce yourself based on your resume?"]
190
  except Exception as e:
191
  print(f"Error generating questions: {e}")
 
193
  return questions
194
 
195
  def generate_overall_feedback(data, percent, answer, questions):
 
196
  if not data or not data.strip() or not answer or not answer.strip() or not questions:
197
  return "Unable to generate feedback due to missing information."
198
 
 
199
  if isinstance(percent, float):
200
  percent_str = f"{percent:.2f}"
201
  else:
 
219
  return "Feedback could not be generated."
220
 
221
  def generate_metrics(data, answer, question):
 
222
  if not data or not data.strip() or not answer or not answer.strip() or not question or not question.strip():
 
223
  return {
224
  "Communication skills": 0.0, "Teamwork and collaboration": 0.0,
225
  "Problem-solving and critical thinking": 0.0, "Time management and organization": 0.0,
 
247
  response = text_model.generate_content(text)
248
  response.resolve()
249
  metrics_text = response.text.strip()
 
250
  for line in metrics_text.split('\n'):
251
  if ':' in line:
252
  key, value_str = line.split(':', 1)
253
  key = key.strip()
254
  try:
255
+ value_clean = value_str.strip().split()[0]
 
256
  value = float(value_clean)
257
  metrics[key] = value
258
  except (ValueError, IndexError):
 
259
  metrics[key] = 0.0
 
260
  expected_metrics = [
261
  "Communication skills", "Teamwork and collaboration",
262
  "Problem-solving and critical thinking", "Time management and organization",
 
265
  for m in expected_metrics:
266
  if m not in metrics:
267
  metrics[m] = 0.0
 
268
  except Exception as e:
269
  print(f"Error generating metrics: {e}")
 
270
  metrics = {
271
  "Communication skills": 0.0, "Teamwork and collaboration": 0.0,
272
  "Problem-solving and critical thinking": 0.0, "Time management and organization": 0.0,
 
274
  }
275
  return metrics
276
 
277
+ # --- Evaluation Logic (Adapted from login_module/evaluate.py) ---
278
+
279
+ def getmetrics(interaction, resume):
280
+ interaction_text = "\n".join([f"{q}: {a}" for q, a in interaction.items()])
281
+ text = f"""This is the user's resume: {resume}.
282
+ And here is the interaction of the interview: {interaction_text}.
283
+ Please evaluate the interview based on the interaction and the resume.
284
+ Rate me the following metrics on a scale of 1 to 10. 1 being the lowest and 10 being the highest.
285
+ Communication skills, Teamwork and collaboration, Problem-solving and critical thinking,
286
+ Time management and organization, Adaptability and resilience. Just give the ratings for the metrics.
287
+ I do not need the feedback. Just the ratings in the format:
288
+ Communication skills: X
289
+ Teamwork and collaboration: Y
290
+ Problem-solving and critical thinking: Z
291
+ Time management and organization: A
292
+ Adaptability and resilience: B
293
+ """
294
+ try:
295
+ response = text_model.generate_content(text)
296
+ response.resolve()
297
+ return response.text
298
+ except Exception as e:
299
+ print(f"Error fetching metrics from AI: {e}")
300
+ return ""
301
+
302
+ def parse_metrics(metric_text):
303
+ metrics = {
304
+ "Communication skills": 0,
305
+ "Teamwork and collaboration": 0,
306
+ "Problem-solving and critical thinking": 0,
307
+ "Time management and organization": 0,
308
+ "Adaptability and resilience": 0
309
+ }
310
+ if not metric_text:
311
+ return metrics
312
+ for line in metric_text.split("\n"):
313
+ if ":" in line:
314
+ key, value = line.split(":", 1)
315
+ key = key.strip()
316
+ value = value.strip()
317
+ if value and value not in ['N/A', 'nan'] and not value.isspace():
318
+ try:
319
+ numbers = re.findall(r'\d+\.?\d*', value)
320
+ if numbers:
321
+ metrics[key] = int(float(numbers[0]))
322
+ else:
323
+ metrics[key] = 0
324
+ except (ValueError, IndexError, TypeError):
325
+ print(f"Warning: Could not parse metric value '{value}' for '{key}'. Setting to 0.")
326
+ metrics[key] = 0
327
+ else:
328
+ metrics[key] = 0
329
+ return metrics
330
+
331
+ def create_metrics_chart(metrics_dict):
332
+ try:
333
+ labels = list(metrics_dict.keys())
334
+ sizes = list(metrics_dict.values())
335
+ if not any(sizes):
336
+ fig, ax = plt.subplots(figsize=(4, 4))
337
+ ax.text(0.5, 0.5, 'No Data Available', ha='center', va='center', transform=ax.transAxes)
338
+ ax.axis('off')
339
+ else:
340
+ fig, ax = plt.subplots(figsize=(6, 6))
341
+ wedges, texts, autotexts = ax.pie(sizes, labels=labels, autopct='%1.1f%%', startangle=90)
342
+ ax.axis('equal')
343
+ for autotext in autotexts:
344
+ autotext.set_color('white')
345
+ autotext.set_fontsize(8)
346
+ buf = io.BytesIO()
347
+ plt.savefig(buf, format='png', bbox_inches='tight')
348
+ buf.seek(0)
349
+ plt.close(fig)
350
+ return buf
351
+ except Exception as e:
352
+ print(f"Error creating chart: {e}")
353
+ fig, ax = plt.subplots(figsize=(4, 4))
354
+ ax.text(0.5, 0.5, 'Chart Error', ha='center', va='center', transform=ax.transAxes)
355
+ ax.axis('off')
356
+ buf = io.BytesIO()
357
+ plt.savefig(buf, format='png')
358
+ buf.seek(0)
359
+ plt.close(fig)
360
+ return buf
361
+
362
+ def generate_evaluation_report(metrics_data, average_rating, feedback_list, interaction_dict):
363
+ try:
364
+ report_lines = [f"## Hey Candidate, here is your interview evaluation:\n"]
365
+ report_lines.append("### Skill Ratings:\n")
366
+ for metric, rating in metrics_data.items():
367
+ report_lines.append(f"* **{metric}:** {rating}/10\n")
368
+ report_lines.append(f"\n### Overall Average Rating: {average_rating:.2f}/10\n")
369
+ report_lines.append("### Feedback Summary:\n")
370
+ if feedback_list:
371
+ last_feedback = feedback_list[-1] if feedback_list else "No feedback available."
372
+ report_lines.append(last_feedback)
373
+ else:
374
+ report_lines.append("No detailed feedback was generated.")
375
+ report_lines.append("\n### Interview Interaction:\n")
376
+ if interaction_dict:
377
+ for q, a in interaction_dict.items():
378
+ report_lines.append(f"* **{q}**\n {a}\n")
379
+ else:
380
+ report_lines.append("Interaction data not available.")
381
+ improvement_content = """
382
+ ### Areas for Improvement:
383
+ * **Communication:** Focus on clarity, conciseness, and tailoring your responses to the audience. Use examples and evidence to support your points.
384
+ * **Teamwork and collaboration:** Highlight your teamwork skills through specific examples and demonstrate your ability to work effectively with others.
385
+ * **Problem-solving and critical thinking:** Clearly explain your problem-solving approach and thought process. Show your ability to analyze information and arrive at logical solutions.
386
+ * **Time management and organization:** Emphasize your ability to manage time effectively and stay organized during challenging situations.
387
+ * **Adaptability and resilience:** Demonstrate your ability to adapt to new situations and overcome challenges. Share examples of how you have handled unexpected situations or setbacks in the past.
388
+ **Remember:** This is just a starting point. Customize the feedback based on the specific strengths and weaknesses identified in your interview.
389
+ """
390
+ report_lines.append(improvement_content)
391
+ report_text = "".join(report_lines)
392
+ return report_text
393
+ except Exception as e:
394
+ error_msg = f"Error generating evaluation report: {e}"
395
+ print(error_msg)
396
+ return error_msg
397
+
398
+ # --- Gradio UI Components and Logic (Interview) ---
399
+
400
  def process_resume(file_obj):
401
  """Handles resume upload and processing."""
 
402
  if not file_obj:
403
  return (
404
  "Please upload a PDF resume.",
 
408
  gr.update(visible=False), gr.update(visible=False),
409
  gr.update(visible=False), gr.update(visible=False),
410
  gr.update(visible=False), gr.update(visible=False),
411
+ gr.update(visible=False)
412
  )
413
 
414
  try:
 
415
  if hasattr(file_obj, 'name'):
416
  file_path = file_obj.name
417
  else:
418
+ file_path = str(file_obj)
 
 
419
 
420
  raw_text = file_processing(file_path)
421
  if not raw_text or not raw_text.strip():
 
427
  gr.update(visible=False), gr.update(visible=False),
428
  gr.update(visible=False), gr.update(visible=False),
429
  gr.update(visible=False), gr.update(visible=False),
430
+ gr.update(visible=False)
431
  )
432
 
433
  processed_data = getallinfo(raw_text)
 
434
  return (
435
  f"File processed successfully!",
436
+ gr.update(visible=True), gr.update(visible=True),
437
+ gr.update(visible=False), gr.update(visible=False),
438
+ gr.update(visible=False), gr.update(visible=False),
439
+ gr.update(visible=False), gr.update(visible=False),
440
+ gr.update(visible=False), gr.update(visible=False),
441
+ gr.update(visible=False), gr.update(visible=False),
442
+ processed_data
 
 
 
 
 
443
  )
444
  except Exception as e:
445
  error_msg = f"Error processing file: {str(e)}"
 
452
  gr.update(visible=False), gr.update(visible=False),
453
  gr.update(visible=False), gr.update(visible=False),
454
  gr.update(visible=False), gr.update(visible=False),
455
+ gr.update(visible=False)
456
+ )
457
+
458
+ def start_interview(roles, processed_resume_data):
459
+ """Starts the interview process."""
460
+ if not roles or (isinstance(roles, list) and not any(roles)) or not processed_resume_data or not processed_resume_data.strip():
461
+ return (
462
+ "Please select a role and ensure resume is processed.",
463
+ "", [], [], {}, {},
464
+ gr.update(visible=False), gr.update(visible=False), gr.update(visible=False),
465
+ gr.update(visible=False), gr.update(visible=False), gr.update(visible=False),
466
+ gr.update(visible=False), gr.update(visible=False), gr.update(visible=False),
467
+ gr.update(visible=False), {}
468
  )
469
 
470
+ try:
471
+ questions = generate_questions(roles, processed_resume_data)
472
+ initial_question = questions[0] if questions else "Could you please introduce yourself?"
473
+ interview_state = {
474
+ "questions": questions,
475
+ "current_q_index": 0,
476
+ "answers": [],
477
+ "feedback": [],
478
+ "interactions": {},
479
+ "metrics_list": [],
480
+ "resume_data": processed_resume_data
481
+ }
482
+ return (
483
+ "Interview started. Please answer the first question.",
484
+ initial_question,
485
+ questions,
486
+ [], {}, {},
487
+ gr.update(visible=True), gr.update(visible=True), gr.update(visible=True),
488
+ gr.update(visible=False), gr.update(visible=False), gr.update(visible=False),
489
+ gr.update(visible=True), gr.update(visible=True),
490
+ interview_state
491
+ )
492
+ except Exception as e:
493
+ error_msg = f"Error starting interview: {str(e)}"
494
+ print(error_msg)
495
+ return (
496
+ error_msg,
497
+ "", [], [], {}, {},
498
+ gr.update(visible=False), gr.update(visible=False), gr.update(visible=False),
499
+ gr.update(visible=False), gr.update(visible=False), gr.update(visible=False),
500
+ gr.update(visible=False), gr.update(visible=False), gr.update(visible=False),
501
+ gr.update(visible=False), {}
502
+ )
503
 
504
  def submit_answer(audio, interview_state):
505
  """Handles submitting an answer via audio."""
506
  if not audio or not interview_state:
507
+ return (
508
+ "No audio recorded or interview not started.",
509
+ "", interview_state,
510
+ gr.update(visible=False), gr.update(visible=False),
511
+ gr.update(visible=False), gr.update(visible=False),
512
+ gr.update(visible=True), gr.update(visible=True), gr.update(visible=True),
513
+ gr.update(visible=False), gr.update(visible=True), gr.update(visible=True)
514
+ )
515
 
516
  try:
 
517
  temp_dir = tempfile.mkdtemp()
518
  audio_file_path = os.path.join(temp_dir, "recorded_audio.wav")
 
519
  sample_rate, audio_data = audio
 
520
  sf.write(audio_file_path, audio_data, sample_rate)
521
 
 
522
  r = sr.Recognizer()
523
  with sr.AudioFile(audio_file_path) as source:
524
  audio_data_sr = r.record(source)
525
  answer_text = r.recognize_google(audio_data_sr)
526
  print(f"Recognized Answer: {answer_text}")
527
 
 
528
  os.remove(audio_file_path)
529
  os.rmdir(temp_dir)
530
 
 
531
  interview_state["answers"].append(answer_text)
532
  current_q_index = interview_state["current_q_index"]
533
  current_question = interview_state["questions"][current_q_index]
534
  interview_state["interactions"][f"Q{current_q_index + 1}: {current_question}"] = f"A{current_q_index + 1}: {answer_text}"
535
 
 
536
  percent_str = generate_feedback(current_question, answer_text)
537
  try:
538
  percent = float(percent_str)
 
543
  interview_state["feedback"].append(feedback_text)
544
 
545
  metrics = generate_metrics(interview_state["resume_data"], answer_text, current_question)
546
+ interview_state["metrics_list"].append(metrics)
547
 
 
548
  interview_state["current_q_index"] += 1
549
 
550
  return (
551
  f"Answer submitted: {answer_text}",
552
  answer_text,
553
  interview_state,
554
+ gr.update(visible=True), gr.update(value=feedback_text, visible=True),
555
+ gr.update(visible=True), gr.update(value=metrics, visible=True),
556
+ gr.update(visible=True), gr.update(visible=True), gr.update(visible=True),
557
+ gr.update(visible=False), gr.update(visible=True), gr.update(visible=True)
 
 
 
 
 
 
558
  )
559
 
560
  except Exception as e:
561
  print(f"Error processing audio answer: {e}")
562
+ return (
563
+ "Error processing audio. Please try again.",
564
+ "", interview_state,
565
+ gr.update(visible=False), gr.update(visible=False),
566
+ gr.update(visible=False), gr.update(visible=False),
567
+ gr.update(visible=True), gr.update(visible=True), gr.update(visible=True),
568
+ gr.update(visible=False), gr.update(visible=True), gr.update(visible=True)
569
+ )
570
 
571
  def next_question(interview_state):
572
  """Moves to the next question or ends the interview."""
573
  if not interview_state:
574
+ return (
575
+ "Interview not started.",
576
+ "", interview_state,
577
+ gr.update(visible=True), gr.update(visible=True), gr.update(visible=True),
578
+ gr.update(visible=False), gr.update(visible=False), gr.update(visible=False),
579
+ gr.update(visible=False), gr.update(visible=True), gr.update(visible=True),
580
+ gr.update(visible=False), gr.update(visible=False)
581
+ )
582
 
583
  current_q_index = interview_state["current_q_index"]
584
  total_questions = len(interview_state["questions"])
 
589
  f"Question {current_q_index + 1}/{total_questions}",
590
  next_q,
591
  interview_state,
592
+ gr.update(visible=True), gr.update(visible=True), gr.update(visible=True),
593
+ gr.update(visible=False), gr.update(visible=False), gr.update(visible=False),
594
+ gr.update(visible=False), gr.update(visible=True), gr.update(visible=True),
595
+ "", {}
 
 
 
 
 
 
596
  )
597
  else:
 
598
  return (
599
  "Interview completed! Click 'Submit Interview' to see your evaluation.",
600
  "Interview Finished",
601
  interview_state,
602
+ gr.update(visible=False), gr.update(visible=False), gr.update(visible=False),
603
+ gr.update(visible=False), gr.update(visible=False), gr.update(visible=False),
604
+ gr.update(visible=True), gr.update(visible=True), gr.update(visible=False),
605
+ "", {}
 
 
 
 
 
 
606
  )
607
 
608
  def submit_interview(interview_state):
609
+ """Handles final submission, triggers evaluation, and prepares results."""
610
+ if not interview_state or not isinstance(interview_state, dict):
611
+ return (
612
+ "Interview state is missing or invalid.",
613
+ interview_state,
614
+ gr.update(visible=False), gr.update(visible=False), "", None
615
+ )
616
 
617
+ try:
618
+ print("Interview submitted for evaluation.")
619
+ interactions = interview_state.get("interactions", {})
620
+ resume_data = interview_state.get("resume_data", "")
621
+ feedback_list = interview_state.get("feedback", [])
622
+ metrics_history = interview_state.get("metrics_list", [])
623
+
624
+ if not interactions:
625
+ error_msg = "No interview interactions found to evaluate."
626
+ print(error_msg)
627
+ return (
628
+ error_msg,
629
+ interview_state,
630
+ gr.update(visible=False), gr.update(visible=False), "", None
631
+ )
632
+
633
+ raw_metrics_text = getmetrics(interactions, resume_data)
634
+ print(f"Raw Metrics Text:\n{raw_metrics_text}")
635
+ final_metrics = parse_metrics(raw_metrics_text)
636
+ print(f"Parsed Metrics: {final_metrics}")
637
+
638
+ if final_metrics:
639
+ average_rating = sum(final_metrics.values()) / len(final_metrics)
640
+ else:
641
+ average_rating = 0.0
642
 
643
+ report_text = generate_evaluation_report(final_metrics, average_rating, feedback_list, interactions)
644
+ print("Evaluation report generated.")
645
+ chart_buffer = create_metrics_chart(final_metrics)
646
+ print("Evaluation chart generated.")
647
+
648
+ return (
649
+ "Evaluation Complete! See your results below.",
650
+ interview_state,
651
+ gr.update(visible=True, value=report_text),
652
+ gr.update(visible=True, value=chart_buffer)
653
+ )
654
+ except Exception as e:
655
+ error_msg = f"Error during evaluation submission: {str(e)}"
656
+ print(error_msg)
657
+ import traceback
658
+ traceback.print_exc()
659
+ return (
660
+ error_msg,
661
+ interview_state,
662
+ gr.update(visible=True, value=error_msg),
663
+ gr.update(visible=False)
664
+ )
665
 
666
  # --- Login and Navigation Logic (Firebase Integrated) ---
667
 
668
  def login(email, password):
 
669
  if not FIREBASE_AVAILABLE:
670
+ return (
671
+ "Firebase not initialized. Login unavailable.",
672
+ gr.update(visible=True), gr.update(visible=False), gr.update(visible=False),
673
+ "", "", "", ""
674
+ )
675
  if not email or not password:
676
+ return (
677
+ "Please enter email and password.",
678
+ gr.update(visible=True), gr.update(visible=False), gr.update(visible=False),
679
+ email, password, "", ""
680
+ )
681
  try:
 
 
 
682
  user = auth.get_user_by_email(email)
683
+ welcome_msg = f"Welcome, {user.display_name or user.uid}!"
684
+ return (
685
+ welcome_msg,
686
+ gr.update(visible=False), gr.update(visible=False), gr.update(visible=True),
687
+ "", "", user.uid, user.email
688
+ )
 
 
 
689
  except auth.UserNotFoundError:
690
+ return (
691
+ "User not found. Please check your email or sign up.",
692
+ gr.update(visible=True), gr.update(visible=False), gr.update(visible=False),
693
+ email, password, "", ""
694
+ )
695
  except Exception as e:
696
  error_msg = f"Login failed: {str(e)}"
697
  print(error_msg)
698
+ return (
699
+ error_msg,
700
+ gr.update(visible=True), gr.update(visible=False), gr.update(visible=False),
701
+ email, password, "", ""
702
+ )
703
 
704
  def signup(email, password, username):
705
+ if not FIREBASE_AVAILABLE:
706
+ return (
707
+ "Firebase not initialized. Signup unavailable.",
708
+ gr.update(visible=True), gr.update(visible=False), gr.update(visible=False),
709
+ gr.update(visible=False), "", "", "", "", ""
710
+ )
711
  if not email or not password or not username:
712
+ return (
713
+ "Please fill all fields.",
714
+ gr.update(visible=False), gr.update(visible=True), gr.update(visible=False),
715
+ gr.update(visible=False), email, password, username, "", ""
716
+ )
717
  try:
 
718
  user = auth.create_user(email=email, password=password, uid=username, display_name=username)
719
  success_msg = f"Account created successfully for {username}!"
720
+ return (
721
+ success_msg,
722
+ gr.update(visible=True), gr.update(visible=False), gr.update(visible=False),
723
+ gr.update(visible=False), "", "", "", user.uid, user.email
724
+ )
 
 
725
  except auth.UidAlreadyExistsError:
726
+ return (
727
+ "Username already exists. Please choose another.",
728
+ gr.update(visible=False), gr.update(visible=True), gr.update(visible=False),
729
+ gr.update(visible=False), email, password, username, "", ""
730
+ )
731
  except auth.EmailAlreadyExistsError:
732
+ return (
733
+ "Email already exists. Please use another email.",
734
+ gr.update(visible=False), gr.update(visible=True), gr.update(visible=False),
735
+ gr.update(visible=False), email, password, username, "", ""
736
+ )
737
  except Exception as e:
738
  error_msg = f"Signup failed: {str(e)}"
739
  print(error_msg)
740
+ return (
741
+ error_msg,
742
+ gr.update(visible=False), gr.update(visible=True), gr.update(visible=False),
743
+ gr.update(visible=False), email, password, username, "", ""
744
+ )
745
 
746
  def logout():
747
+ return (
748
+ "",
749
+ gr.update(visible=True), gr.update(visible=False), gr.update(visible=False),
750
+ gr.update(visible=False), "", "", "", "", ""
751
+ )
 
752
 
753
  def navigate_to_interview():
754
+ return (gr.update(visible=True), gr.update(visible=False))
755
 
756
  def navigate_to_chat():
757
+ return (gr.update(visible=False), gr.update(visible=True))
758
 
759
  # --- Import Chat Module Functions ---
 
760
  try:
761
  from login_module import chat as chat_module
762
  CHAT_MODULE_AVAILABLE = True
 
767
  chat_module = None
768
 
769
  # --- Gradio Interface ---
 
770
  with gr.Blocks(title="PrepGenie - Mock Interview") as demo:
771
  gr.Markdown("# 🦈 PrepGenie")
 
772
  interview_state = gr.State({})
 
773
  user_state = gr.State("")
 
774
  user_email_state = gr.State("")
 
775
  processed_resume_data_state = gr.State("")
776
 
777
  # --- Login Section ---
 
781
  login_password_input = gr.Textbox(label="Password", type="password")
782
  login_btn = gr.Button("Login")
783
  login_status = gr.Textbox(label="Login Status", interactive=False)
 
784
  switch_to_signup_btn = gr.Button("Don't have an account? Sign Up")
785
 
786
  # --- Signup Section ---
 
791
  signup_username_input = gr.Textbox(label="Unique Username")
792
  signup_btn = gr.Button("Create my account")
793
  signup_status = gr.Textbox(label="Signup Status", interactive=False)
 
794
  switch_to_login_btn = gr.Button("Already have an account? Login")
795
 
796
+ # --- Main App Sections ---
797
  with gr.Column(visible=False) as main_app:
798
  with gr.Row():
799
  with gr.Column(scale=1):
800
  logout_btn = gr.Button("Logout")
801
  with gr.Column(scale=4):
 
802
  welcome_display = gr.Markdown("### Welcome, User!")
803
 
804
  with gr.Row():
 
812
  # --- Interview Section ---
813
  with gr.Column(visible=False) as interview_selection:
814
  gr.Markdown("## Mock Interview")
 
815
  with gr.Row():
816
  with gr.Column():
817
  file_upload_interview = gr.File(label="Upload Resume (PDF)", file_types=[".pdf"])
 
819
  with gr.Column():
820
  file_status_interview = gr.Textbox(label="Status", interactive=False)
821
 
 
822
  role_selection = gr.Dropdown(
823
  choices=["Data Scientist", "Software Engineer", "Product Manager", "Data Analyst", "Business Analyst"],
824
+ multiselect=True, label="Select Job Role(s)", visible=False
 
 
825
  )
826
  start_interview_btn = gr.Button("Start Interview", visible=False)
 
 
827
  question_display = gr.Textbox(label="Question", interactive=False, visible=False)
828
  answer_instructions = gr.Markdown("Click 'Record Answer' and speak your response.", visible=False)
829
  audio_input = gr.Audio(label="Record Answer", type="numpy", visible=False)
830
  submit_answer_btn = gr.Button("Submit Answer", visible=False)
831
  next_question_btn = gr.Button("Next Question", visible=False)
832
  submit_interview_btn = gr.Button("Submit Interview", visible=False, variant="primary")
 
 
833
  answer_display = gr.Textbox(label="Your Answer", interactive=False, visible=False)
834
  feedback_display = gr.Textbox(label="Feedback", interactive=False, visible=False)
835
  metrics_display = gr.JSON(label="Metrics", visible=False)
 
 
836
  processed_resume_data_hidden_interview = gr.Textbox(visible=False)
837
 
838
+ # --- Evaluation Results Section ---
839
+ with gr.Column(visible=False) as evaluation_selection:
840
+ gr.Markdown("## Interview Evaluation Results")
841
+ evaluation_report_display = gr.Markdown(label="Your Evaluation Report", visible=False)
842
+ evaluation_chart_display = gr.Image(label="Skills Breakdown", type="pil", visible=False)
843
+
844
  # --- Chat Section ---
845
  if CHAT_MODULE_AVAILABLE:
846
  with gr.Column(visible=False) as chat_selection:
847
  gr.Markdown("## Chat with Resume")
 
848
  with gr.Row():
849
  with gr.Column():
850
  file_upload_chat = gr.File(label="Upload Resume (PDF)", file_types=[".pdf"])
851
  process_chat_btn = gr.Button("Process Resume")
852
  with gr.Column():
853
  file_status_chat = gr.Textbox(label="Status", interactive=False)
854
+ chatbot = gr.Chatbot(label="Chat History", visible=False, type="messages") # Updated type
 
 
855
  query_input = gr.Textbox(label="Ask about your resume", placeholder="Type your question here...", visible=False)
856
  send_btn = gr.Button("Send", visible=False)
857
  else:
 
859
  gr.Markdown("## Chat with Resume (Unavailable)")
860
  gr.Textbox(value="Chat module is not available.", interactive=False)
861
 
 
 
862
  interview_view = interview_selection
863
  chat_view = chat_selection
 
864
  interview_btn.click(fn=navigate_to_interview, inputs=None, outputs=[interview_view, chat_view])
865
  if CHAT_MODULE_AVAILABLE:
866
  chat_btn.click(fn=navigate_to_chat, inputs=None, outputs=[interview_view, chat_view])
 
 
 
 
867
 
868
  # --- Event Listeners for Interview ---
 
869
  process_btn_interview.click(
870
  fn=process_resume,
871
  inputs=[file_upload_interview],
872
  outputs=[
873
+ file_status_interview, role_selection, start_interview_btn,
874
+ question_display, answer_instructions, audio_input,
875
+ submit_answer_btn, next_question_btn, submit_interview_btn,
876
+ answer_display, feedback_display, metrics_display,
877
+ processed_resume_data_hidden_interview
878
+ ]
 
 
 
 
 
 
 
 
879
  )
880
 
 
881
  start_interview_btn.click(
882
  fn=start_interview,
883
  inputs=[role_selection, processed_resume_data_hidden_interview],
884
  outputs=[
885
  file_status_interview, question_display,
886
+ interview_state["questions"], interview_state["answers"],
887
+ interview_state["interactions"], interview_state["metrics_list"],
888
  audio_input, submit_answer_btn, next_question_btn,
889
  submit_interview_btn, feedback_display, metrics_display,
890
+ question_display, answer_instructions,
891
+ interview_state
892
  ]
893
  )
894
 
 
895
  submit_answer_btn.click(
896
  fn=submit_answer,
897
  inputs=[audio_input, interview_state],
898
  outputs=[
899
  file_status_interview, answer_display, interview_state,
900
+ feedback_display, feedback_display,
901
+ metrics_display, metrics_display,
902
  audio_input, submit_answer_btn, next_question_btn,
903
  submit_interview_btn, question_display, answer_instructions
904
  ]
905
  )
906
 
 
907
  next_question_btn.click(
908
  fn=next_question,
909
  inputs=[interview_state],
 
912
  audio_input, submit_answer_btn, next_question_btn,
913
  feedback_display, metrics_display, submit_interview_btn,
914
  question_display, answer_instructions,
915
+ answer_display, metrics_display
916
  ]
917
  )
918
 
 
919
  submit_interview_btn.click(
920
  fn=submit_interview,
921
  inputs=[interview_state],
922
+ outputs=[
923
+ file_status_interview,
924
+ interview_state,
925
+ evaluation_report_display,
926
+ evaluation_chart_display
927
+ ]
928
  )
929
 
930
+ # --- Event Listeners for Chat ---
931
  if CHAT_MODULE_AVAILABLE:
 
932
  process_chat_btn.click(
933
  fn=chat_module.process_resume_chat,
934
  inputs=[file_upload_chat],
935
  outputs=[file_status_chat, processed_resume_data_state, query_input, send_btn, chatbot]
936
  )
 
 
937
  send_btn.click(
938
  fn=chat_module.chat_with_resume,
939
+ inputs=[query_input, processed_resume_data_state, chatbot],
940
+ outputs=[query_input, chatbot]
941
  )
942
+ query_input.submit(
943
  fn=chat_module.chat_with_resume,
944
  inputs=[query_input, processed_resume_data_state, chatbot],
945
  outputs=[query_input, chatbot]
 
949
  login_btn.click(
950
  fn=login,
951
  inputs=[login_email_input, login_password_input],
952
+ outputs=[login_status, login_section, signup_section, main_app,
953
+ login_email_input, login_password_input, user_state, user_email_state]
954
  )
955
 
956
  signup_btn.click(
957
  fn=signup,
958
  inputs=[signup_email_input, signup_password_input, signup_username_input],
959
+ outputs=[signup_status, login_section, signup_section, main_app,
960
+ signup_email_input, signup_password_input, signup_username_input,
961
+ user_state, user_email_state]
962
  )
963
 
964
  logout_btn.click(
965
  fn=logout,
966
  inputs=None,
967
+ outputs=[login_status, login_section, signup_section, main_app,
968
+ login_email_input, login_password_input, signup_username_input,
969
+ user_state, user_email_state]
970
  )
971
 
 
972
  switch_to_signup_btn.click(
973
  fn=lambda: (gr.update(visible=False), gr.update(visible=True)),
974
  inputs=None,
 
983
 
984
  # Run the app
985
  if __name__ == "__main__":
986
+ demo.launch(server_name="0.0.0.0", server_port=7860, share=False)