ibraheem007 commited on
Commit
20fbf2f
Β·
verified Β·
1 Parent(s): 2b5f7c8

Upload 4 files

Browse files
components/output_renderer.py ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import re
3
+
4
+ def render_output_section():
5
+ """Render the generated output section - FIXED for regenerated content"""
6
+ if st.session_state.generated_output:
7
+ # ALWAYS render the current content, regardless of regeneration status
8
+ render_output(st.session_state.generated_output)
9
+
10
+ def render_output(output):
11
+ """Render output with LaTeX support"""
12
+ st.markdown("---")
13
+ st.markdown("### πŸ“˜ Generated Content")
14
+
15
+ # Extract and render LaTeX
16
+ output = render_latex_expressions(output)
17
+
18
+ # Render remaining text
19
+ st.markdown(output.strip(), unsafe_allow_html=True)
20
+
21
+ def render_latex_expressions(output):
22
+ """Extract and render LaTeX expressions from output"""
23
+ latex_patterns = re.findall(r"\$\$(.+?)\$\$|\\\[(.+?)\\\]|\\\((.+?)\\\)", output, re.DOTALL)
24
+
25
+ for groups in latex_patterns:
26
+ latex_expr = next(filter(None, groups))
27
+ try:
28
+ st.latex(latex_expr.strip())
29
+ except:
30
+ st.markdown(f"`{latex_expr}`")
31
+
32
+ # Clean LaTeX from output for text display
33
+ for g in groups:
34
+ if g:
35
+ output = output.replace(f"$${g}$$", "").replace(f"\\[{g}\\]", "").replace(f"\\({g}\\)", "")
36
+
37
+ return output
components/student_flow.py ADDED
@@ -0,0 +1,439 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import re
3
+ from generator import model_manager
4
+ from components.file_processor import get_student_content_input
5
+ from components.export_handler import generate_pdf
6
+ from components.session_manager import update_session_state
7
+
8
+ def render_student_flow():
9
+ """Render the student content generation flow"""
10
+ st.header("πŸŽ“ Student Learning Assistant")
11
+ render_student_info()
12
+
13
+ # Check if we need to regenerate with a different model
14
+ if st.session_state.get("regenerate_with_new_model") and st.session_state.get("original_content_text"):
15
+ handle_student_regeneration()
16
+ return
17
+
18
+ content_text, filename = get_student_content_input()
19
+ student_level = get_student_level()
20
+ specific_help = get_student_context()
21
+
22
+ if content_text and st.button("πŸš€ Simplify This Content", type="primary", key="generate_student_content"):
23
+ generate_student_content(content_text, student_level, specific_help, filename)
24
+
25
+ def handle_student_regeneration():
26
+ """Handle student content regeneration with new model"""
27
+ st.header("πŸŽ“ Student Learning Assistant")
28
+ render_student_info()
29
+
30
+ # Show we're regenerating
31
+ st.info("πŸ”„ Regenerating your content with the new model...")
32
+
33
+ # Get preserved inputs
34
+ content_text = st.session_state.original_content_text
35
+ student_level = st.session_state.student_level
36
+ specific_help = st.session_state.get("original_specific_help", "")
37
+ filename = st.session_state.get("original_filename", "regenerated_content.pdf")
38
+
39
+ # Show original inputs for context
40
+ with st.expander("πŸ“‹ Original Inputs (Read-only)", expanded=True):
41
+ st.write(f"**Student Level:** {student_level}")
42
+ st.write(f"**Content Length:** {len(content_text)} characters")
43
+ if specific_help:
44
+ st.write(f"**Specific Help Requested:** {specific_help}")
45
+
46
+ # Regenerate the content
47
+ generate_student_content(content_text, student_level, specific_help, filename)
48
+
49
+ def render_student_info():
50
+ """Render student flow information"""
51
+ st.info("""
52
+ **How this works:**
53
+ Upload your course material or paste difficult content, and I'll generate a simplified,
54
+ easy-to-understand version tailored to your level.
55
+ """)
56
+
57
+ def get_student_level():
58
+ """Get student's academic level"""
59
+ st.subheader("🎯 Your Learning Level")
60
+ return st.selectbox(
61
+ "What's your current academic level?",
62
+ ["High School", "Undergraduate (1st-2nd year)", "Undergraduate (3rd-4th year)", "Masters", "PhD"],
63
+ help="This helps me tailor the explanation to your level",
64
+ key="student_level_select"
65
+ )
66
+
67
+ def get_student_context():
68
+ """Get additional context from student"""
69
+ st.subheader("πŸ’‘ Additional Context (Optional)")
70
+ return st.text_area(
71
+ "What specifically are you struggling with?",
72
+ placeholder="e.g., 'I don't understand backpropagation' or 'The math notation is confusing'",
73
+ help="Tell me what's confusing you for better help",
74
+ key="student_context_input"
75
+ )
76
+
77
+ def generate_student_content(content_text, student_level, specific_help, filename):
78
+ """Generate content for student with optimized content handling"""
79
+ # STORE ORIGINAL CONTENT FOR REGENERATION
80
+ st.session_state.original_content_text = content_text
81
+ st.session_state.original_specific_help = specific_help
82
+ st.session_state.original_filename = filename
83
+
84
+ if len(content_text) > 15000:
85
+ st.info("πŸ“ Your content is quite comprehensive. I'll process it in sections for optimal quality...")
86
+ generate_chunked_content(content_text, student_level, specific_help, filename)
87
+ elif len(content_text) > 8000:
88
+ st.info("πŸ“ Processing your content with optimized models...")
89
+ generate_single_large_content(content_text, student_level, specific_help, filename)
90
+ else:
91
+ generate_single_content(content_text, student_level, specific_help, filename)
92
+
93
+ def generate_single_content(content_text, student_level, specific_help, filename):
94
+ """Generate content for normal-sized inputs"""
95
+ with st.spinner("✍️ Creating student-friendly explanation..."):
96
+ selected_model = st.session_state.get("selected_model", "groq")
97
+
98
+ # Use Phi-3 specific prompts if Phi-3 is selected
99
+ if selected_model == "phi3":
100
+ prompt = build_phi3_student_prompt(content_text, student_level, specific_help)
101
+ else:
102
+ prompt = build_groq_student_prompt(content_text, student_level, specific_help)
103
+
104
+ try:
105
+ output = model_manager.generate(
106
+ prompt,
107
+ selected_model,
108
+ user_type="student",
109
+ student_level=student_level,
110
+ content_type="simplified_explanation"
111
+ )
112
+
113
+ if output is None:
114
+ st.error("❌ AI service returned no response")
115
+ return
116
+
117
+ # Clean the output for Phi-3 specifically
118
+ if selected_model == "phi3":
119
+ output = clean_phi3_output(output)
120
+
121
+ if any(msg in output for msg in ["🚫", "πŸ“Š", "❌", "[Error", "[RateLimit]", "[Quota]", "[Auth]", "[Empty]", "❌ Phi-3 Error:"]):
122
+ st.error(output)
123
+ return
124
+
125
+ # Generate PDF first
126
+ pdf_data = generate_pdf(output, "student", level=student_level)
127
+
128
+ # Update session state
129
+ update_session_state(
130
+ original_prompt=prompt,
131
+ generated_output=output,
132
+ feedback_given=False,
133
+ regenerated=False,
134
+ content_source="student",
135
+ student_level=student_level,
136
+ original_filename=filename,
137
+ pdf_export_data=pdf_data,
138
+ saved_to_history=False,
139
+ current_history_id=None,
140
+ generated_model=selected_model
141
+ )
142
+
143
+ st.rerun()
144
+
145
+ except Exception as e:
146
+ st.error(f"❌ Generation failed: {str(e)}")
147
+
148
+ def generate_single_large_content(content_text, student_level, specific_help, filename):
149
+ """Generate content for large but manageable inputs using high-capacity models"""
150
+ with st.spinner("✍️ Creating comprehensive explanation..."):
151
+ selected_model = st.session_state.get("selected_model", "groq")
152
+
153
+ # Use Phi-3 specific prompts if Phi-3 is selected
154
+ if selected_model == "phi3":
155
+ prompt = build_phi3_student_prompt(content_text, student_level, specific_help)
156
+ else:
157
+ prompt = build_groq_student_prompt(content_text, student_level, specific_help)
158
+
159
+ try:
160
+ if selected_model == "phi3":
161
+ output = model_manager.generate(
162
+ prompt,
163
+ selected_model,
164
+ user_type="student",
165
+ student_level=student_level,
166
+ content_type="simplified_explanation"
167
+ )
168
+ output = clean_phi3_output(output)
169
+ else:
170
+ output = model_manager.groq_generator.generate_large_content(prompt)
171
+
172
+ if any(msg in output for msg in ["🚫", "πŸ“Š", "❌", "[Error", "[RateLimit]", "[Quota]", "[Auth]", "[Empty]", "❌ Phi-3 Error:"]):
173
+ st.error(output)
174
+ return
175
+
176
+ # Generate PDF and update session state
177
+ pdf_data = generate_pdf(output, "student", level=student_level)
178
+
179
+ update_session_state(
180
+ original_prompt=prompt,
181
+ generated_output=output,
182
+ feedback_given=False,
183
+ regenerated=False,
184
+ content_source="student",
185
+ student_level=student_level,
186
+ original_filename=filename,
187
+ pdf_export_data=pdf_data,
188
+ saved_to_history=False,
189
+ current_history_id=None,
190
+ generated_model=selected_model
191
+ )
192
+
193
+ st.rerun()
194
+
195
+ except Exception as e:
196
+ st.error(f"❌ Generation failed: {str(e)}")
197
+
198
+ def generate_chunked_content(content_text, student_level, specific_help, filename):
199
+ """Generate content for very large inputs by chunking"""
200
+ chunks = chunk_content(content_text, max_chunk_size=8000)
201
+
202
+ if not chunks:
203
+ st.error("❌ Unable to process this content. Please try with shorter text.")
204
+ return
205
+
206
+ all_outputs = []
207
+ progress_bar = st.progress(0)
208
+ status_text = st.empty()
209
+
210
+ selected_model = st.session_state.get("selected_model", "groq")
211
+
212
+ for i, chunk in enumerate(chunks):
213
+ status_text.text(f"πŸ“– Processing section {i+1}/{len(chunks)}...")
214
+ progress_bar.progress((i) / len(chunks))
215
+
216
+ # Use Phi-3 specific prompts if Phi-3 is selected
217
+ if selected_model == "phi3":
218
+ prompt = build_phi3_chunk_prompt(chunk, student_level, specific_help, i+1, len(chunks))
219
+ else:
220
+ prompt = build_groq_chunk_prompt(chunk, student_level, specific_help, i+1, len(chunks))
221
+
222
+ try:
223
+ output = model_manager.generate(
224
+ prompt,
225
+ selected_model,
226
+ user_type="student",
227
+ student_level=student_level,
228
+ content_type="simplified_explanation"
229
+ )
230
+
231
+ if selected_model == "phi3":
232
+ output = clean_phi3_output(output)
233
+
234
+ if any(msg in output for msg in ["🚫", "πŸ“Š", "❌", "[Error", "[RateLimit]", "[Quota]", "[Auth]", "[Empty]", "❌ Phi-3 Error:"]):
235
+ st.error(f"❌ Failed to process section {i+1}: {output}")
236
+ return
237
+
238
+ all_outputs.append(output)
239
+
240
+ except Exception as e:
241
+ st.error(f"❌ Failed to process section {i+1}: {str(e)}")
242
+ return
243
+
244
+ # Update progress to complete
245
+ progress_bar.progress(1.0)
246
+ status_text.text("βœ… All sections processed! Combining results...")
247
+
248
+ # Combine all outputs
249
+ final_output = combine_chunk_outputs(all_outputs, student_level)
250
+
251
+ # Generate PDF and update session state
252
+ pdf_data = generate_pdf(final_output, "student", level=student_level)
253
+
254
+ update_session_state(
255
+ original_prompt=f"Simplified content for {student_level} level",
256
+ generated_output=final_output,
257
+ feedback_given=False,
258
+ regenerated=False,
259
+ content_source="student",
260
+ student_level=student_level,
261
+ original_filename=filename,
262
+ pdf_export_data=pdf_data,
263
+ saved_to_history=False,
264
+ current_history_id=None,
265
+ generated_model=selected_model
266
+ )
267
+
268
+ status_text.text("βœ… Content generation complete!")
269
+ st.rerun()
270
+
271
+ def chunk_content(content, max_chunk_size=8000):
272
+ """Split content into manageable chunks based on new limits"""
273
+ paragraphs = re.split(r'\n\s*\n', content)
274
+ chunks = []
275
+ current_chunk = ""
276
+
277
+ for paragraph in paragraphs:
278
+ if len(current_chunk) + len(paragraph) < max_chunk_size:
279
+ current_chunk += paragraph + "\n\n"
280
+ else:
281
+ if current_chunk:
282
+ chunks.append(current_chunk.strip())
283
+ current_chunk = paragraph + "\n\n"
284
+
285
+ if current_chunk:
286
+ chunks.append(current_chunk.strip())
287
+
288
+ return chunks
289
+
290
+ def build_phi3_chunk_prompt(chunk, student_level, specific_help, chunk_num, total_chunks):
291
+ """Build Phi-3 specific prompt for a single chunk - STRICTER VERSION"""
292
+ prompt = f"""TASK: Write the actual simplified explanation for this content section.
293
+
294
+ CONTENT SECTION {chunk_num}/{total_chunks}:
295
+ {chunk}
296
+
297
+ STUDENT: {student_level}
298
+ """
299
+ if specific_help:
300
+ prompt += f"\nSTUDENT'S REQUEST: {specific_help}"
301
+
302
+ prompt += f"""
303
+
304
+ DIRECTIVES:
305
+ - WRITE THE ACTUAL EXPLANATION ONLY
306
+ - Use simple, clear language for {student_level}
307
+ - Break complex ideas into basic concepts
308
+ - Use everyday examples and analogies
309
+ - Define technical terms in simple words
310
+ - Structure with clear headings
311
+
312
+ STRICT PROHIBITIONS:
313
+ - NO instructional language (no "I will explain", "This section describes")
314
+ - NO meta-commentary about the explanation
315
+ - NO learning objectives or activities
316
+ - NO phrases like "students will understand"
317
+ - NO references to yourself as AI/assistant
318
+
319
+ BEGIN EXPLANATION NOW:
320
+
321
+ """
322
+ return prompt
323
+
324
+ def build_groq_chunk_prompt(chunk, student_level, specific_help, chunk_num, total_chunks):
325
+ """Build Groq prompt for a single chunk"""
326
+ prompt = f"""Simplify this content for {student_level} students:
327
+
328
+ CONTENT SECTION {chunk_num}/{total_chunks}:
329
+ {chunk}
330
+
331
+ STUDENT LEVEL: {student_level}
332
+ """
333
+ if specific_help:
334
+ prompt += f"\nSPECIFIC CONFUSION: {specific_help}"
335
+
336
+ prompt += f"""
337
+
338
+ Create a clear, simplified explanation of this content.
339
+ """
340
+ return prompt
341
+
342
+ def combine_chunk_outputs(outputs, student_level):
343
+ """Combine chunk outputs into a cohesive document"""
344
+ combined = f"# Simplified Content for {student_level} Level\n\n"
345
+
346
+ for i, output in enumerate(outputs):
347
+ # Clean up any remaining instructional language
348
+ clean_output = clean_phi3_output(output)
349
+ combined += f"## Part {i+1}\n\n{clean_output.strip()}\n\n---\n\n"
350
+
351
+ return combined
352
+
353
+ def build_phi3_student_prompt(content_text, student_level, specific_help):
354
+ """Build Phi-3 specific prompt for student content generation - STRICTER VERSION"""
355
+ prompt = f"""TASK: Write the actual simplified explanation for this content.
356
+
357
+ ORIGINAL CONTENT:
358
+ {content_text}
359
+
360
+ STUDENT: {student_level}
361
+ """
362
+ if specific_help:
363
+ prompt += f"\nSTUDENT'S SPECIFIC REQUEST: {specific_help}"
364
+
365
+ prompt += f"""
366
+
367
+ DIRECTIVES:
368
+ - WRITE THE ACTUAL EXPLANATION ONLY
369
+ - Use simple, clear language appropriate for {student_level}
370
+ - Break down complex concepts into basic building blocks
371
+ - Use everyday analogies and concrete examples
372
+ - Define all technical terms when first used
373
+ - Structure logically with clear headings
374
+ - Make it engaging and conversational
375
+
376
+ STRICT PROHIBITIONS:
377
+ - NO instructional language (no "I will explain", "Let me break this down")
378
+ - NO meta-commentary about the explanation process
379
+ - NO learning objectives, activities, or assessments
380
+ - NO phrases like "students will learn" or "this explains"
381
+ - NO lesson plans or educational frameworks
382
+ - NO references to yourself as AI, assistant, or teacher
383
+
384
+ BEGIN SIMPLIFIED EXPLANATION NOW:
385
+
386
+ """
387
+ return prompt
388
+
389
+ def build_groq_student_prompt(content_text, student_level, specific_help):
390
+ """Build Groq prompt for student content generation"""
391
+ prompt = f"""Create a simplified explanation of this content for {student_level} students:
392
+
393
+ CONTENT TO SIMPLIFY:
394
+ {content_text}
395
+
396
+ STUDENT LEVEL: {student_level}
397
+ """
398
+ if specific_help:
399
+ prompt += f"\nSPECIFIC CONFUSION: {specific_help}"
400
+
401
+ prompt += f"""
402
+
403
+ Provide a clear, easy-to-understand explanation that:
404
+ - Breaks down complex concepts into simple terms
405
+ - Uses analogies and examples appropriate for {student_level}
406
+ - Defines technical terminology clearly
407
+ - Structures the content for easy learning
408
+ - Focuses on the most important concepts
409
+
410
+ Make the explanation engaging and accessible.
411
+ """
412
+ return prompt
413
+
414
+ def clean_phi3_output(output):
415
+ """Clean Phi-3 output to remove instructional language and meta-commentary"""
416
+ # Remove common instructional phrases
417
+ patterns_to_remove = [
418
+ r'Here is.*?explanation:',
419
+ r'I will.*?now:',
420
+ r'Let me.*?concept:',
421
+ r'This section.*?content:',
422
+ r'Below is.*?explanation:',
423
+ r'Here\'s.*?breakdown:',
424
+ r'In this.*?we will',
425
+ r'Students will.*?understand',
426
+ r'We can.*?explain',
427
+ r'The following.*?explains',
428
+ r'This content.*?describes',
429
+ r'As an AI.*?assistant',
430
+ ]
431
+
432
+ cleaned = output
433
+ for pattern in patterns_to_remove:
434
+ cleaned = re.sub(pattern, '', cleaned, flags=re.IGNORECASE)
435
+
436
+ # Remove any empty lines at the start
437
+ cleaned = re.sub(r'^\s*\n+', '', cleaned)
438
+
439
+ return cleaned.strip()
components/tutor_flow.py ADDED
@@ -0,0 +1,804 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import re
3
+ from generator import model_manager
4
+ from components.export_handler import generate_pdf
5
+ from components.session_manager import update_session_state
6
+ from components.file_processor import process_uploaded_file # ADD THIS IMPORT
7
+
8
+ def render_tutor_flow():
9
+ """Render the tutor content generation flow"""
10
+ st.header("πŸ‘¨β€πŸ« Tutor Content Creator")
11
+ render_tutor_info()
12
+
13
+ # Check if we need to regenerate with a different model
14
+ if st.session_state.get("regenerate_with_new_model") and st.session_state.get("original_topic"):
15
+ handle_tutor_regeneration()
16
+ return
17
+
18
+ # GET INPUT METHOD FIRST
19
+ input_method = get_tutor_input_method()
20
+
21
+ if input_method == "Upload Document":
22
+ topic, objectives, student_level, content_type, additional_req, document_text, filename = get_tutor_inputs_from_document()
23
+ if topic and document_text and st.button("πŸš€ Generate from Document", type="primary", key="generate_from_document"):
24
+ generate_tutor_content_from_document(topic, objectives, student_level, content_type, additional_req, document_text, filename)
25
+ else:
26
+ topic, objectives, student_level, content_type, additional_req = get_tutor_inputs_manual()
27
+ if topic and objectives and st.button("πŸš€ Create Teaching Content", type="primary", key="generate_tutor_content"):
28
+ generate_tutor_content(topic, objectives, student_level, content_type, additional_req)
29
+
30
+ def get_tutor_input_method():
31
+ """Let tutor choose between manual input or document upload"""
32
+ st.subheader("πŸ“₯ Input Method")
33
+ return st.radio(
34
+ "How would you like to provide the content?",
35
+ ["Describe Topic & Objectives", "Upload Document"],
36
+ help="Choose to either describe what you need or upload existing materials to transform"
37
+ )
38
+
39
+ def get_tutor_inputs_from_document():
40
+ """Get inputs from tutor when uploading a document"""
41
+ st.subheader("πŸ“„ Upload Your Document")
42
+
43
+ uploaded_file = st.file_uploader(
44
+ "Upload your educational document",
45
+ type=["pdf", "pptx", "docx", "txt"],
46
+ help="Upload lesson plans, curriculum materials, textbook chapters, or any educational content"
47
+ )
48
+
49
+ document_text = ""
50
+ filename = "content.pdf"
51
+
52
+ if uploaded_file:
53
+ with st.spinner("πŸ“– Reading your document..."):
54
+ document_text, error = process_uploaded_file(uploaded_file)
55
+ if error:
56
+ st.error(f"❌ {error}")
57
+ else:
58
+ st.success("βœ… Document processed successfully!")
59
+ filename = uploaded_file.name
60
+
61
+ # Show document preview
62
+ with st.expander("πŸ“‹ Document Preview", expanded=False):
63
+ st.text_area("Extracted Text", document_text[:1000] + "..." if len(document_text) > 1000 else document_text, height=200, key="doc_preview")
64
+
65
+ st.subheader("🎯 Transformation Instructions")
66
+
67
+ topic = st.text_input(
68
+ "What topic is this document about?",
69
+ placeholder="e.g., Neural Networks, French Revolution, Calculus Basics",
70
+ help="Briefly describe the main topic of the document"
71
+ )
72
+
73
+ content_type = st.selectbox(
74
+ "What would you like me to create from this document?",
75
+ ["Lecture Notes", "Study Guide", "Interactive Activity", "Lesson Plan", "Comprehensive Explanation", "Assessment Questions"],
76
+ help="Choose the format you want me to generate based on your document"
77
+ )
78
+
79
+ student_level = st.selectbox(
80
+ "What level should the content be adapted for?",
81
+ ["Elementary School", "Middle School", "High School", "Undergraduate", "Graduate", "Professional Development"],
82
+ help="Select the target student level for the generated content"
83
+ )
84
+
85
+ additional_req = st.text_area(
86
+ "Any specific transformation requirements?",
87
+ placeholder="e.g., 'Make it more interactive', 'Simplify the language', 'Add real-world examples', 'Focus on key concepts'",
88
+ help="Specify how you want the content transformed"
89
+ )
90
+
91
+ # For document-based generation, objectives are optional since they can be extracted
92
+ objectives = st.text_area(
93
+ "Learning Objectives (Optional)",
94
+ placeholder="e.g., 'Students should understand X, apply Y, analyze Z'",
95
+ help="Optional: Specify what students should learn. If empty, I'll infer from the document."
96
+ )
97
+
98
+ return topic, objectives, student_level, content_type, additional_req, document_text, filename
99
+
100
+ def get_tutor_inputs_manual():
101
+ """Get all inputs from tutor (original manual method)"""
102
+ st.subheader("πŸ“– Teaching Topic")
103
+ topic = st.text_area(
104
+ "What topic do you want to teach?",
105
+ placeholder="e.g., Neural Networks, Quantum Computing, Calculus Fundamentals, French Revolution",
106
+ help="Be specific about the topic or concept",
107
+ key="tutor_topic_input"
108
+ )
109
+
110
+ st.subheader("🎯 Learning Objectives")
111
+ objectives = st.text_area(
112
+ "What should students learn from this?",
113
+ placeholder="e.g., 'Understand backpropagation', 'Apply gradient descent', 'Explain the causes of WWI'",
114
+ help="List the key learning outcomes for students",
115
+ key="tutor_objectives_input"
116
+ )
117
+
118
+ st.subheader("πŸ‘₯ Student Level")
119
+ student_level = st.selectbox(
120
+ "What level are your students?",
121
+ ["Elementary School", "Middle School", "High School", "Undergraduate", "Graduate", "Professional Development"],
122
+ help="Select the appropriate level for your students",
123
+ key="tutor_student_level"
124
+ )
125
+
126
+ st.subheader("πŸ“ Content Format")
127
+ content_type = st.selectbox(
128
+ "What type of content do you need?",
129
+ ["Lesson Plan", "Study Guide", "Lecture Notes", "Interactive Activity", "Comprehensive Explanation"],
130
+ help="Choose the format that best suits your teaching needs",
131
+ key="tutor_content_type_select"
132
+ )
133
+
134
+ st.subheader("⚑ Additional Requirements (Optional)")
135
+ additional_req = st.text_area(
136
+ "Any specific requirements?",
137
+ placeholder="e.g., 'Include code examples', 'Use historical primary sources', 'Focus on practical applications'",
138
+ help="Specify any particular focus or requirements",
139
+ key="tutor_additional_req"
140
+ )
141
+
142
+ return topic, objectives, student_level, content_type, additional_req
143
+
144
+ def generate_tutor_content_from_document(topic, objectives, student_level, content_type, additional_req, document_text, filename):
145
+ """Generate content for tutor based on uploaded document"""
146
+ # STORE ORIGINAL INPUTS FOR REGENERATION
147
+ st.session_state.original_topic = topic
148
+ st.session_state.original_objectives = objectives
149
+ st.session_state.original_additional_req = additional_req
150
+ st.session_state.original_document_text = document_text
151
+ st.session_state.original_filename = filename
152
+
153
+ with st.spinner("πŸ“ Transforming your document into new content..."):
154
+ selected_model = st.session_state.get("selected_model", "groq")
155
+
156
+ # Build document-based prompt
157
+ if selected_model == "phi3":
158
+ prompt = build_phi3_document_prompt(topic, objectives, student_level, content_type, additional_req, document_text)
159
+ else:
160
+ prompt = build_groq_document_prompt(topic, objectives, student_level, content_type, additional_req, document_text)
161
+
162
+ try:
163
+ output = model_manager.generate(
164
+ prompt,
165
+ selected_model,
166
+ user_type="tutor",
167
+ student_level=student_level,
168
+ content_type=content_type
169
+ )
170
+
171
+ # Check if it's an error message
172
+ if any(msg in output for msg in ["🚫", "πŸ“Š", "❌", "[Error", "[RateLimit]", "[Quota]", "[Auth]", "[Empty]", "❌ Phi-3 Error:"]):
173
+ st.error(output)
174
+ return
175
+
176
+ # Generate PDF
177
+ pdf_data = generate_pdf(
178
+ output,
179
+ "tutor",
180
+ level=student_level,
181
+ topic=topic,
182
+ content_type=f"{content_type} from Document",
183
+ objectives=objectives or "Derived from uploaded document"
184
+ )
185
+
186
+ # Update session state
187
+ update_session_state(
188
+ original_prompt=prompt,
189
+ generated_output=output,
190
+ feedback_given=False,
191
+ regenerated=False,
192
+ content_source="tutor_document",
193
+ student_level=student_level,
194
+ tutor_topic=topic,
195
+ tutor_content_type=content_type,
196
+ pdf_export_data=pdf_data,
197
+ saved_to_history=False,
198
+ current_history_id=None,
199
+ generated_model=selected_model
200
+ )
201
+
202
+ st.rerun()
203
+
204
+ except Exception as e:
205
+ st.error(f"❌ Generation failed: {str(e)}")
206
+
207
+ def build_phi3_document_prompt(topic, objectives, student_level, content_type, additional_req, document_text):
208
+ """Build Phi-3 prompt for document-based content generation"""
209
+
210
+ content_descriptions = {
211
+ "Lecture Notes": "detailed lecture notes suitable for classroom teaching",
212
+ "Study Guide": "comprehensive study guide for student self-study",
213
+ "Interactive Activity": "engaging interactive learning activities for students",
214
+ "Lesson Plan": "structured lesson plan with timing and activities",
215
+ "Comprehensive Explanation": "thorough explanatory document",
216
+ "Assessment Questions": "quiz questions, exercises, or assessment materials"
217
+ }
218
+
219
+ description = content_descriptions.get(content_type, "educational content")
220
+
221
+ prompt = f"""Transform the provided educational document into {description}.
222
+
223
+ ORIGINAL DOCUMENT CONTENT:
224
+ {document_text}
225
+
226
+ TOPIC: {topic}
227
+ """
228
+ if objectives:
229
+ prompt += f"\nLEARNING OBJECTIVES: {objectives}"
230
+
231
+ prompt += f"""
232
+ TARGET AUDIENCE: {student_level} students
233
+ OUTPUT FORMAT: {content_type}
234
+ """
235
+ if additional_req:
236
+ prompt += f"\nSPECIFIC REQUIREMENTS: {additional_req}"
237
+
238
+ prompt += f"""
239
+
240
+ TRANSFORMATION INSTRUCTIONS:
241
+ - Create {content_type.lower()} based on the original document content
242
+ - Adapt the material for {student_level} students
243
+ - Maintain the core educational concepts but reformat for the new purpose
244
+ - Use appropriate language and examples for the target level
245
+ - Structure the content effectively for {content_type.lower()}
246
+
247
+ FORBIDDEN:
248
+ - Do not simply copy the original content
249
+ - Do not use phrases like "based on the document" or "the original content says"
250
+ - Do not refer to yourself as an AI or assistant
251
+ - Do not include meta-commentary about the transformation process
252
+
253
+ BEGIN {content_type.upper()}:
254
+
255
+ """
256
+ return prompt
257
+
258
+ def build_groq_document_prompt(topic, objectives, student_level, content_type, additional_req, document_text):
259
+ """Build Groq prompt for document-based content generation"""
260
+
261
+ prompt = f"""Create {content_type.lower()} based on the following document:
262
+
263
+ DOCUMENT CONTENT:
264
+ {document_text}
265
+
266
+ TOPIC: {topic}
267
+ """
268
+ if objectives:
269
+ prompt += f"\nLEARNING OBJECTIVES: {objectives}"
270
+
271
+ prompt += f"""
272
+ STUDENT LEVEL: {student_level}
273
+ CONTENT TYPE: {content_type}
274
+ """
275
+ if additional_req:
276
+ prompt += f"\nADDITIONAL REQUIREMENTS: {additional_req}"
277
+
278
+ prompt += f"""
279
+
280
+ Transform the document content into effective {content_type.lower()} suitable for {student_level} students.
281
+ """
282
+ return prompt
283
+
284
+ def handle_tutor_regeneration():
285
+ """Handle tutor content regeneration with new model"""
286
+ st.header("πŸ‘¨β€πŸ« Tutor Content Creator")
287
+ render_tutor_info()
288
+
289
+ # Show we're regenerating
290
+ st.info("πŸ”„ Regenerating your content with the new model...")
291
+
292
+ # Check if this was document-based content
293
+ if st.session_state.get("content_source") == "tutor_document" and st.session_state.get("original_document_text"):
294
+ # Document-based regeneration
295
+ topic = st.session_state.original_topic
296
+ objectives = st.session_state.original_objectives
297
+ student_level = st.session_state.student_level
298
+ content_type = st.session_state.tutor_content_type
299
+ additional_req = st.session_state.get("original_additional_req", "")
300
+ document_text = st.session_state.original_document_text
301
+ filename = st.session_state.get("original_filename", "content.pdf")
302
+
303
+ # Show original inputs for context
304
+ with st.expander("πŸ“‹ Original Inputs (Read-only)", expanded=True):
305
+ st.write(f"**Source Document:** {filename}")
306
+ st.write(f"**Topic:** {topic}")
307
+ st.write(f"**Student Level:** {student_level}")
308
+ st.write(f"**Content Type:** {content_type}")
309
+ if objectives:
310
+ st.write(f"**Learning Objectives:** {objectives}")
311
+ if additional_req:
312
+ st.write(f"**Transformation Requirements:** {additional_req}")
313
+
314
+ # Regenerate from document
315
+ generate_tutor_content_from_document(topic, objectives, student_level, content_type, additional_req, document_text, filename)
316
+ else:
317
+ # Manual input regeneration (existing code)
318
+ topic = st.session_state.original_topic
319
+ objectives = st.session_state.original_objectives
320
+ student_level = st.session_state.student_level
321
+ content_type = st.session_state.tutor_content_type
322
+ additional_req = st.session_state.get("original_additional_req", "")
323
+
324
+ # Show original inputs for context
325
+ with st.expander("πŸ“‹ Original Inputs (Read-only)", expanded=True):
326
+ st.write(f"**Topic:** {topic}")
327
+ st.write(f"**Student Level:** {student_level}")
328
+ st.write(f"**Content Type:** {content_type}")
329
+ st.write(f"**Learning Objectives:** {objectives}")
330
+ if additional_req:
331
+ st.write(f"**Additional Requirements:** {additional_req}")
332
+
333
+ # Regenerate the content
334
+ generate_tutor_content(topic, objectives, student_level, content_type, additional_req)
335
+
336
+ def render_tutor_info():
337
+ """Render tutor flow information"""
338
+ st.info("""
339
+ **How this works:**
340
+ - **Describe Topic & Objectives**: Tell me what you want to teach, and I'll generate educational content from scratch
341
+ - **Upload Document**: Upload existing materials (lesson plans, textbooks, etc.) and I'll transform them into new formats like lecture notes, study guides, or interactive activities
342
+
343
+ **Perfect for**: Converting lesson plans to lecture notes, textbook chapters to study guides, curriculum materials to interactive activities
344
+ """)
345
+
346
+ def generate_tutor_content(topic, objectives, student_level, content_type, additional_req):
347
+ """Generate content for tutor with optimized content handling"""
348
+ # STORE ORIGINAL INPUTS FOR REGENERATION
349
+ st.session_state.original_topic = topic
350
+ st.session_state.original_objectives = objectives
351
+ st.session_state.original_additional_req = additional_req
352
+
353
+ total_content_size = len(topic) + len(objectives) + len(additional_req)
354
+
355
+ if total_content_size > 15000:
356
+ st.info("πŸ“ Creating comprehensive content with detailed sections...")
357
+ generate_chunked_tutor_content(topic, objectives, student_level, content_type, additional_req)
358
+ elif total_content_size > 8000:
359
+ st.info("πŸ“ Processing your comprehensive teaching content...")
360
+ generate_single_large_tutor_content(topic, objectives, student_level, content_type, additional_req)
361
+ else:
362
+ generate_single_tutor_content(topic, objectives, student_level, content_type, additional_req)
363
+
364
+ def generate_single_tutor_content(topic, objectives, student_level, content_type, additional_req):
365
+ """Generate content for normal-sized tutor requests"""
366
+ with st.spinner("✍️ Generating educational content..."):
367
+ selected_model = st.session_state.get("selected_model", "groq")
368
+
369
+ # Use Phi-3 specific prompts if Phi-3 is selected
370
+ if selected_model == "phi3":
371
+ prompt = build_phi3_tutor_prompt(topic, objectives, student_level, content_type, additional_req)
372
+ else:
373
+ prompt = build_groq_tutor_prompt(topic, objectives, student_level, content_type, additional_req)
374
+
375
+ try:
376
+ output = model_manager.generate(
377
+ prompt,
378
+ selected_model,
379
+ user_type="tutor",
380
+ student_level=student_level,
381
+ content_type=content_type
382
+ )
383
+
384
+ # Check if it's an error message
385
+ if any(msg in output for msg in ["🚫", "πŸ“Š", "❌", "[Error", "[RateLimit]", "[Quota]", "[Auth]", "[Empty]", "❌ Phi-3 Error:"]):
386
+ st.error(output)
387
+ return
388
+
389
+ # Generate PDF
390
+ pdf_data = generate_pdf(
391
+ output,
392
+ "tutor",
393
+ level=student_level,
394
+ topic=topic,
395
+ content_type=content_type,
396
+ objectives=objectives
397
+ )
398
+
399
+ # Update session state
400
+ update_session_state(
401
+ original_prompt=prompt,
402
+ generated_output=output,
403
+ feedback_given=False,
404
+ regenerated=False,
405
+ content_source="tutor",
406
+ student_level=student_level,
407
+ tutor_topic=topic,
408
+ tutor_content_type=content_type,
409
+ pdf_export_data=pdf_data,
410
+ saved_to_history=False,
411
+ current_history_id=None,
412
+ generated_model=selected_model
413
+ )
414
+
415
+ st.rerun()
416
+
417
+ except Exception as e:
418
+ st.error(f"❌ Generation failed: {str(e)}")
419
+
420
+ def generate_single_large_tutor_content(topic, objectives, student_level, content_type, additional_req):
421
+ """Generate content for large tutor requests using high-capacity models"""
422
+ with st.spinner("✍️ Generating comprehensive educational content..."):
423
+ selected_model = st.session_state.get("selected_model", "groq")
424
+
425
+ # Use Phi-3 specific prompts if Phi-3 is selected
426
+ if selected_model == "phi3":
427
+ prompt = build_phi3_tutor_prompt(topic, objectives, student_level, content_type, additional_req)
428
+ else:
429
+ prompt = build_groq_tutor_prompt(topic, objectives, student_level, content_type, additional_req)
430
+
431
+ try:
432
+ if selected_model == "phi3":
433
+ output = model_manager.generate(
434
+ prompt,
435
+ selected_model,
436
+ user_type="tutor",
437
+ student_level=student_level,
438
+ content_type=content_type
439
+ )
440
+ else:
441
+ output = model_manager.groq_generator.generate_large_content(prompt)
442
+
443
+ if any(msg in output for msg in ["🚫", "πŸ“Š", "❌", "[Error", "[RateLimit]", "[Quota]", "[Auth]", "[Empty]", "❌ Phi-3 Error:"]):
444
+ st.error(output)
445
+ return
446
+
447
+ # Generate PDF
448
+ pdf_data = generate_pdf(
449
+ output,
450
+ "tutor",
451
+ level=student_level,
452
+ topic=topic,
453
+ content_type=content_type,
454
+ objectives=objectives
455
+ )
456
+
457
+ # Update session state
458
+ update_session_state(
459
+ original_prompt=prompt,
460
+ generated_output=output,
461
+ feedback_given=False,
462
+ regenerated=False,
463
+ content_source="tutor",
464
+ student_level=student_level,
465
+ tutor_topic=topic,
466
+ tutor_content_type=content_type,
467
+ pdf_export_data=pdf_data,
468
+ saved_to_history=False,
469
+ current_history_id=None,
470
+ generated_model=selected_model
471
+ )
472
+
473
+ st.rerun()
474
+
475
+ except Exception as e:
476
+ st.error(f"❌ Generation failed: {str(e)}")
477
+
478
+ def generate_chunked_tutor_content(topic, objectives, student_level, content_type, additional_req):
479
+ """Generate comprehensive tutor content by breaking down objectives"""
480
+ objective_chunks = chunk_objectives(objectives, max_chunk_size=4000)
481
+
482
+ if not objective_chunks:
483
+ generate_single_large_tutor_content(topic, objectives, student_level, content_type, additional_req)
484
+ return
485
+
486
+ all_outputs = []
487
+ progress_bar = st.progress(0)
488
+ status_text = st.empty()
489
+
490
+ selected_model = st.session_state.get("selected_model", "groq")
491
+
492
+ for i, objective_chunk in enumerate(objective_chunks):
493
+ status_text.text(f"πŸ“– Creating content for learning objective {i+1}/{len(objective_chunks)}...")
494
+ progress_bar.progress((i) / len(objective_chunks))
495
+
496
+ # Use Phi-3 specific prompts if Phi-3 is selected
497
+ if selected_model == "phi3":
498
+ prompt = build_phi3_tutor_chunk_prompt(topic, objective_chunk, student_level, content_type, additional_req, i+1, len(objective_chunks))
499
+ else:
500
+ prompt = build_groq_tutor_chunk_prompt(topic, objective_chunk, student_level, content_type, additional_req, i+1, len(objective_chunks))
501
+
502
+ try:
503
+ output = model_manager.generate(
504
+ prompt,
505
+ selected_model,
506
+ user_type="tutor",
507
+ student_level=student_level,
508
+ content_type=content_type
509
+ )
510
+
511
+ if any(msg in output for msg in ["🚫", "πŸ“Š", "❌", "[Error", "[RateLimit]", "[Quota]", "[Auth]", "[Empty]", "❌ Phi-3 Error:"]):
512
+ st.error(f"❌ Failed to process objective {i+1}: {output}")
513
+ return
514
+
515
+ all_outputs.append(output)
516
+
517
+ except Exception as e:
518
+ st.error(f"❌ Failed to process objective {i+1}: {str(e)}")
519
+ return
520
+
521
+ # Update progress to complete
522
+ progress_bar.progress(1.0)
523
+ status_text.text("βœ… All sections processed! Combining results...")
524
+
525
+ # Combine all outputs
526
+ final_output = combine_tutor_outputs(all_outputs, topic, student_level, content_type)
527
+
528
+ # Generate PDF and update session state
529
+ pdf_data = generate_pdf(
530
+ final_output,
531
+ "tutor",
532
+ level=student_level,
533
+ topic=topic,
534
+ content_type=content_type,
535
+ objectives=objectives
536
+ )
537
+
538
+ update_session_state(
539
+ original_prompt=f"{content_type} for {topic} - {student_level} level",
540
+ generated_output=final_output,
541
+ feedback_given=False,
542
+ regenerated=False,
543
+ content_source="tutor",
544
+ student_level=student_level,
545
+ tutor_topic=topic,
546
+ tutor_content_type=content_type,
547
+ pdf_export_data=pdf_data,
548
+ saved_to_history=False,
549
+ current_history_id=None,
550
+ generated_model=selected_model
551
+ )
552
+
553
+ status_text.text("βœ… Content generation complete!")
554
+ st.rerun()
555
+
556
+ def chunk_objectives(objectives, max_chunk_size=4000):
557
+ """Split objectives into manageable chunks with increased size"""
558
+ objective_items = re.split(r'[\nβ€’\-]', objectives)
559
+ objective_items = [item.strip() for item in objective_items if item.strip()]
560
+
561
+ chunks = []
562
+ current_chunk = ""
563
+
564
+ for item in objective_items:
565
+ if len(current_chunk) + len(item) < max_chunk_size:
566
+ if current_chunk:
567
+ current_chunk += "\nβ€’ " + item
568
+ else:
569
+ current_chunk = "β€’ " + item
570
+ else:
571
+ if current_chunk:
572
+ chunks.append(current_chunk)
573
+ current_chunk = "β€’ " + item
574
+
575
+ if current_chunk:
576
+ chunks.append(current_chunk)
577
+
578
+ if len(chunks) > 5 and max_chunk_size < 6000:
579
+ return chunk_objectives(objectives, max_chunk_size + 1000)
580
+
581
+ return chunks
582
+
583
+ def build_phi3_tutor_chunk_prompt(topic, objective_chunk, student_level, content_type, additional_req, chunk_num, total_chunks):
584
+ """Build Phi-3 specific prompt for a single tutor chunk"""
585
+ prompt = f"""You are creating educational content. Write the actual content that teaches these concepts.
586
+
587
+ TOPIC: {topic}
588
+
589
+ CONCEPTS TO EXPLAIN:
590
+ {objective_chunk}
591
+
592
+ AUDIENCE: {student_level} students
593
+ FORMAT: {content_type}
594
+ """
595
+ if additional_req:
596
+ prompt += f"\nSPECIFIC FOCUS: {additional_req}"
597
+
598
+ prompt += f"""
599
+
600
+ CONTENT REQUIREMENTS:
601
+ - Write the actual educational material, not a lesson plan
602
+ - Explain concepts clearly with examples
603
+ - Use appropriate language for {student_level}
604
+ - Include definitions and key information
605
+ - Structure the content for learning
606
+
607
+ FORBIDDEN: Do not include timing, activities, assessments, or teaching instructions.
608
+ FORBIDDEN: Do not use phrases like "students will learn" or "this section will cover".
609
+
610
+ BEGIN CONTENT:
611
+
612
+ """
613
+ return prompt
614
+
615
+ def build_groq_tutor_chunk_prompt(topic, objective_chunk, student_level, content_type, additional_req, chunk_num, total_chunks):
616
+ """Build Groq prompt for a single tutor chunk"""
617
+ prompt = f"""Create educational content for the following:
618
+
619
+ TOPIC: {topic}
620
+
621
+ LEARNING OBJECTIVES:
622
+ {objective_chunk}
623
+
624
+ STUDENT LEVEL: {student_level}
625
+ CONTENT TYPE: {content_type}
626
+ """
627
+ if additional_req:
628
+ prompt += f"\nADDITIONAL REQUIREMENTS: {additional_req}"
629
+
630
+ prompt += f"""
631
+
632
+ Generate comprehensive educational content that directly teaches these concepts.
633
+ """
634
+ return prompt
635
+
636
+ def combine_tutor_outputs(outputs, topic, student_level, content_type):
637
+ """Combine tutor chunk outputs into a cohesive document"""
638
+ combined = f"# {content_type}: {topic}\n\n"
639
+ combined += f"**Target Level:** {student_level}\n\n"
640
+
641
+ for i, output in enumerate(outputs):
642
+ # Clean up any instructional language
643
+ clean_output = re.sub(r'(?:Here is|I will|This section|Students will|We will).*?(?=\n\n|\n#|\n##|$)', '', output, flags=re.IGNORECASE | re.DOTALL)
644
+ section_title = f"## Part {i+1}\n\n"
645
+ combined += section_title + clean_output.strip() + "\n\n---\n\n"
646
+
647
+ return combined
648
+
649
+ def build_phi3_tutor_prompt(topic, objectives, student_level, content_type, additional_req):
650
+ """Build Phi-3 specific prompt for tutor content generation"""
651
+
652
+ # Define content type requirements
653
+ content_requirements = {
654
+ "Lesson Plan": {
655
+ "description": "Create a structured lesson plan with timing, activities, and assessments",
656
+ "requirements": [
657
+ "Include learning objectives and outcomes",
658
+ "Provide a timed lesson structure with activities",
659
+ "Include teaching methods and student activities",
660
+ "Add assessment methods and homework if applicable",
661
+ "Use appropriate pedagogical approaches"
662
+ ],
663
+ "forbidden": []
664
+ },
665
+ "Study Guide": {
666
+ "description": "Create a comprehensive study guide for students",
667
+ "requirements": [
668
+ "Include key concepts and definitions",
669
+ "Provide summaries and review questions",
670
+ "Add practice problems or exercises",
671
+ "Include study tips and strategies",
672
+ "Structure for easy review and self-testing"
673
+ ],
674
+ "forbidden": [
675
+ "Do not include timing or classroom management instructions"
676
+ ]
677
+ },
678
+ "Lecture Notes": {
679
+ "description": "Create detailed lecture notes for teaching",
680
+ "requirements": [
681
+ "Include comprehensive explanations",
682
+ "Provide examples and case studies",
683
+ "Add key points and summaries",
684
+ "Include relevant diagrams or frameworks if needed",
685
+ "Structure for clear presentation delivery"
686
+ ],
687
+ "forbidden": [
688
+ "Do not include student activities or assessments"
689
+ ]
690
+ },
691
+ "Interactive Activity": {
692
+ "description": "Create engaging interactive learning activities",
693
+ "requirements": [
694
+ "Design hands-on or group activities",
695
+ "Include clear instructions for students",
696
+ "Provide learning objectives for each activity",
697
+ "Add discussion questions or prompts",
698
+ "Include facilitation guidelines if needed"
699
+ ],
700
+ "forbidden": [
701
+ "Do not include lengthy theoretical explanations"
702
+ ]
703
+ },
704
+ "Comprehensive Explanation": {
705
+ "description": "Create a thorough explanatory document",
706
+ "requirements": [
707
+ "Provide in-depth conceptual explanations",
708
+ "Use analogies and real-world examples",
709
+ "Include step-by-step breakdowns of complex ideas",
710
+ "Add visual descriptions or mental models",
711
+ "Structure from basic to advanced concepts"
712
+ ],
713
+ "forbidden": [
714
+ "Do not include activities, assessments, or teaching instructions"
715
+ ]
716
+ }
717
+ }
718
+
719
+ # Get requirements for the specific content type
720
+ content_spec = content_requirements.get(content_type, content_requirements["Comprehensive Explanation"])
721
+
722
+ prompt = f"""You are creating educational content for {content_type.lower()}. Write the actual content.
723
+
724
+ TOPIC: {topic}
725
+
726
+ LEARNING GOALS:
727
+ {objectives}
728
+
729
+ AUDIENCE: {student_level} students
730
+ CONTENT TYPE: {content_type} - {content_spec['description']}
731
+ """
732
+ if additional_req:
733
+ prompt += f"\nSPECIFIC FOCUS: {additional_req}"
734
+
735
+ prompt += f"""
736
+
737
+ CONTENT REQUIREMENTS:
738
+ - Write the actual educational material
739
+ - Explain concepts clearly with examples appropriate for {student_level}
740
+ - Use appropriate language and terminology for the audience
741
+ - Structure the content logically for learning
742
+ """
743
+
744
+ # Add content-specific requirements
745
+ for requirement in content_spec['requirements']:
746
+ prompt += f"- {requirement}\n"
747
+
748
+ # Add content-specific forbidden items
749
+ if content_spec['forbidden']:
750
+ prompt += "\nFORBIDDEN:\n"
751
+ for forbidden in content_spec['forbidden']:
752
+ prompt += f"- {forbidden}\n"
753
+ else:
754
+ # Default forbidden items for content types that need more flexibility
755
+ prompt += "\nFORBIDDEN:\n- Do not use generic phrases like 'this section will cover'\n- Do not create content that is not directly educational\n"
756
+
757
+ # Add universal forbidden items that apply to all content types
758
+ universal_forbidden = [
759
+ "Do not use phrases like 'students will learn' or 'this teaches'",
760
+ "Do not refer to yourself as an AI or assistant",
761
+ "Do not add meta-commentary about the content"
762
+ ]
763
+
764
+ for forbidden in universal_forbidden:
765
+ prompt += f"- {forbidden}\n"
766
+
767
+ prompt += f"""
768
+
769
+ BEGIN {content_type.upper()} CONTENT:
770
+
771
+ """
772
+ return prompt
773
+
774
+ def build_groq_tutor_prompt(topic, objectives, student_level, content_type, additional_req):
775
+ """Build Groq prompt for tutor content generation"""
776
+
777
+ content_descriptions = {
778
+ "Lesson Plan": "structured lesson plan with timing, activities, and assessments",
779
+ "Study Guide": "comprehensive study guide with key concepts and practice questions",
780
+ "Lecture Notes": "detailed lecture notes for teaching delivery",
781
+ "Interactive Activity": "engaging interactive learning activities",
782
+ "Comprehensive Explanation": "thorough explanatory document"
783
+ }
784
+
785
+ description = content_descriptions.get(content_type, "educational content")
786
+
787
+ prompt = f"""Create {description} for teaching:
788
+
789
+ TOPIC: {topic}
790
+
791
+ LEARNING OBJECTIVES:
792
+ {objectives}
793
+
794
+ STUDENT LEVEL: {student_level}
795
+ CONTENT TYPE: {content_type}
796
+ """
797
+ if additional_req:
798
+ prompt += f"\nADDITIONAL REQUIREMENTS: {additional_req}"
799
+
800
+ prompt += f"""
801
+
802
+ Generate detailed {content_type.lower()} that achieves the learning objectives.
803
+ """
804
+ return prompt
components/ui_components.py ADDED
@@ -0,0 +1,313 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ from db.helpers import get_research_stats
3
+ from generator import model_manager
4
+ from export_training_data_from_db import export_training_data_from_db
5
+
6
+ def render_header():
7
+ st.title("🧠 TailorED - AI-Powered Educational Content Generator")
8
+
9
+ def render_sidebar():
10
+ with st.sidebar:
11
+ # === RESEARCH MODEL SELECTION ===
12
+ st.subheader("πŸ”¬ Research Model Selection")
13
+
14
+ # Initialize model choice if not exists
15
+ if "selected_model" not in st.session_state:
16
+ st.session_state.selected_model = "groq"
17
+
18
+ # Check if we have existing content and need to show regeneration prompt
19
+ if (st.session_state.get("generated_output") and
20
+ st.session_state.get("current_page") == "generator" and
21
+ not st.session_state.get("showing_regeneration_prompt", False)):
22
+
23
+ # Store current model before potential change
24
+ previous_model = st.session_state.selected_model
25
+
26
+ # Model selection with regeneration logic
27
+ model_choice = st.radio(
28
+ "Select AI Model:",
29
+ options=[
30
+ "πŸ§ͺ Phi-3 (Research Model)",
31
+ "πŸ“Š Groq (Training Data)"
32
+ ],
33
+ index=0 if st.session_state.selected_model == "phi3" else 1,
34
+ key="research_model_selection"
35
+ )
36
+
37
+ # Determine new model selection
38
+ new_model = "phi3" if model_choice == "πŸ§ͺ Phi-3 (Research Model)" else "groq"
39
+
40
+ # If model changed and we have content, show regeneration prompt
41
+ if new_model != previous_model:
42
+ st.session_state.pending_model_switch = new_model
43
+ st.session_state.previous_model = previous_model
44
+ st.session_state.showing_regeneration_prompt = True
45
+ st.rerun()
46
+
47
+ else:
48
+ # Normal model selection (no content or prompt already shown)
49
+ model_choice = st.radio(
50
+ "Select AI Model:",
51
+ options=[
52
+ "πŸ§ͺ Phi-3 (Research Model)",
53
+ "πŸ“Š Groq (Training Data)"
54
+ ],
55
+ index=0 if st.session_state.selected_model == "phi3" else 1,
56
+ key="research_model_selection"
57
+ )
58
+
59
+ # Update model selection
60
+ new_model = "phi3" if model_choice == "πŸ§ͺ Phi-3 (Research Model)" else "groq"
61
+ if new_model != st.session_state.selected_model:
62
+ st.session_state.selected_model = new_model
63
+ if not st.session_state.get("generated_output"):
64
+ st.success(f"βœ… Switched to {new_model.upper()} model")
65
+
66
+ # Show current model status
67
+ current_model = st.session_state.selected_model
68
+ if current_model == "phi3":
69
+ st.info("πŸ§ͺ **Testing Phi-3** - Research model being evaluated")
70
+ else:
71
+ st.success("πŸ“Š **Generating Training Data** - Groq outputs will train Phi-3")
72
+
73
+ # Render regeneration prompt if needed
74
+ if st.session_state.get("showing_regeneration_prompt", False):
75
+ render_regeneration_prompt()
76
+
77
+ # Research context
78
+ st.markdown("---")
79
+ st.markdown("### 🎯 Research Mission")
80
+ st.markdown("""
81
+ We're **fine-tuning Phi-3 Mini** using Groq's high-quality outputs.
82
+
83
+ **Your Role:** Compare both models to help improve Phi-3!
84
+ - Use **Groq** to create training examples
85
+ - Use **Phi-3** to test research progress
86
+ - Switch models to compare outputs on the same content
87
+ """)
88
+
89
+ st.markdown("---")
90
+
91
+ st.header("πŸŽ“ Research Progress")
92
+ st.write("**Your feedback trains better educational AI**")
93
+
94
+ try:
95
+ stats = get_research_stats()
96
+ render_progress_metrics(stats)
97
+ render_quality_indicators(stats)
98
+ render_research_status(stats)
99
+ render_service_status()
100
+ except Exception as e:
101
+ st.error(f"Sidebar failed: {e}")
102
+ render_default_sidebar()
103
+
104
+ def render_regeneration_prompt():
105
+ """Show prompt to regenerate content with new model"""
106
+ st.markdown("---")
107
+ st.warning("πŸ”„ **Model Changed!**")
108
+
109
+ previous_model = st.session_state.previous_model
110
+ new_model = st.session_state.pending_model_switch
111
+
112
+ st.write(f"You switched from **{previous_model.upper()}** to **{new_model.upper()}**.")
113
+ st.write("Would you like to regenerate the same content with the new model?")
114
+
115
+ col1, col2, col3 = st.columns([1, 1, 1])
116
+
117
+ with col1:
118
+ if st.button("βœ… Yes, Regenerate", use_container_width=True, key="confirm_regenerate"):
119
+ # Trigger regeneration with new model
120
+ st.session_state.selected_model = new_model
121
+ st.session_state.regenerate_with_new_model = True
122
+ st.session_state.showing_regeneration_prompt = False
123
+ st.session_state.pending_model_switch = None
124
+ st.session_state.previous_model = None
125
+ st.rerun()
126
+
127
+ with col2:
128
+ if st.button("❌ No, Keep Current", use_container_width=True, key="keep_current"):
129
+ # Revert to previous model and keep current content
130
+ st.session_state.selected_model = st.session_state.previous_model
131
+ st.session_state.showing_regeneration_prompt = False
132
+ st.session_state.pending_model_switch = None
133
+ st.session_state.previous_model = None
134
+ st.rerun()
135
+
136
+ with col3:
137
+ if st.button("🏠 Go to Home", use_container_width=True, key="go_home"):
138
+ # Clear content and go to home
139
+ from components.session_manager import clear_session
140
+ clear_session()
141
+ st.session_state.showing_regeneration_prompt = False
142
+ st.session_state.pending_model_switch = None
143
+ st.session_state.previous_model = None
144
+ st.rerun()
145
+
146
+ def render_progress_metrics(stats):
147
+ col1, col2 = st.columns(2)
148
+
149
+ with col1:
150
+ st.metric("Total Feedback", stats.get("total_feedback", 0))
151
+
152
+ with col2:
153
+ st.metric("Content Generated", stats.get("total_content", 0))
154
+
155
+ if stats.get("total_feedback", 0) > 0:
156
+ render_progress_bar(stats)
157
+
158
+ def render_progress_bar(stats):
159
+ """Render progress bar towards research goal"""
160
+ st.subheader("πŸ“ˆ Our Progress")
161
+ target_feedback = 100
162
+ total_feedback = stats.get("total_feedback", 0)
163
+ progress_percent = min((total_feedback / target_feedback) * 100, 100)
164
+ st.progress(progress_percent / 100)
165
+ st.caption(f"Goal: 100 feedback points β€’ {total_feedback}/100")
166
+
167
+ if total_feedback >= target_feedback:
168
+ st.balloons()
169
+ st.success("πŸŽ‰ Amazing! We've reached our research goal!")
170
+
171
+ def render_quality_indicators(stats):
172
+ st.subheader("✨ Model Quality Comparison")
173
+
174
+ # Safely get model scores with fallbacks
175
+ groq_scores = stats.get("groq_scores", {})
176
+ phi3_scores = stats.get("phi3_scores", {})
177
+
178
+ groq_clarity = groq_scores.get("clarity", 0)
179
+ groq_depth = groq_scores.get("depth", 0)
180
+ phi3_clarity = phi3_scores.get("clarity", 0)
181
+ phi3_depth = phi3_scores.get("depth", 0)
182
+
183
+ # Groq metrics
184
+ st.markdown("**πŸ“Š Groq (Training Data)**")
185
+ col1, col2 = st.columns(2)
186
+ with col1:
187
+ # Show delta if we have both scores
188
+ delta_clarity = None
189
+ if groq_clarity > 0 and phi3_clarity > 0:
190
+ delta_clarity = f"+{groq_clarity - phi3_clarity:.1f}"
191
+ st.metric("Avg Clarity", f"{groq_clarity}/5", delta=delta_clarity)
192
+ with col2:
193
+ delta_depth = None
194
+ if groq_depth > 0 and phi3_depth > 0:
195
+ delta_depth = f"+{groq_depth - phi3_depth:.1f}"
196
+ st.metric("Avg Depth", f"{groq_depth}/5", delta=delta_depth)
197
+
198
+ # Phi-3 metrics
199
+ st.markdown("**πŸ§ͺ Phi-3 (Research Model)**")
200
+ col3, col4 = st.columns(2)
201
+ with col3:
202
+ delta_clarity_phi3 = None
203
+ if phi3_clarity > 0 and groq_clarity > 0:
204
+ delta_clarity_phi3 = f"{phi3_clarity - groq_clarity:.1f}"
205
+ st.metric("Avg Clarity", f"{phi3_clarity}/5", delta=delta_clarity_phi3)
206
+ with col4:
207
+ delta_depth_phi3 = None
208
+ if phi3_depth > 0 and groq_depth > 0:
209
+ delta_depth_phi3 = f"{phi3_depth - groq_depth:.1f}"
210
+ st.metric("Avg Depth", f"{phi3_depth}/5", delta=delta_depth_phi3)
211
+
212
+ # Show quality gap analysis
213
+ if groq_clarity > 0 and phi3_clarity > 0:
214
+ clarity_gap = groq_clarity - phi3_clarity
215
+ depth_gap = groq_depth - phi3_depth
216
+
217
+ if clarity_gap > 0 or depth_gap > 0:
218
+ st.caption(f"πŸ” Quality gap: Clarity +{clarity_gap:.1f}, Depth +{depth_gap:.1f}")
219
+ elif clarity_gap < 0 or depth_gap < 0:
220
+ st.caption(f"πŸŽ‰ Phi-3 leads: Clarity {abs(clarity_gap):.1f}, Depth {abs(depth_gap):.1f}")
221
+ else:
222
+ st.caption("βš–οΈ Models performing equally")
223
+
224
+ def render_research_status(stats):
225
+ st.subheader("πŸ”¬ Research Progress")
226
+
227
+ col1, col2, col3 = st.columns(3)
228
+
229
+ with col1:
230
+ st.metric("Groq Data", stats.get("groq_feedback_count", 0))
231
+ st.caption("For fine-tuning")
232
+
233
+ with col2:
234
+ st.metric("High-Quality Groq", stats.get("high_quality_groq", 0))
235
+ st.caption("Fine-tuning ready")
236
+
237
+ with col3:
238
+ st.metric("Phi-3 Data", stats.get("phi3_feedback_count", 0))
239
+ st.caption("For comparison")
240
+
241
+ # Fine-tuning readiness
242
+ target_examples = 50
243
+ high_quality_groq = stats.get("high_quality_groq", 0)
244
+
245
+ if high_quality_groq >= target_examples:
246
+ st.success("πŸŽ‰ Ready to fine-tune Phi-3 with Groq data!")
247
+ if st.button("πŸš€ Export Fine-tuning Data", use_container_width=True, type="primary"):
248
+ from export_training_data_from_db import export_training_data_from_db
249
+ if export_training_data_from_db():
250
+ st.success("βœ… Groq data exported for Phi-3 fine-tuning!")
251
+ else:
252
+ st.error("Export failed")
253
+ else:
254
+ needed = target_examples - high_quality_groq
255
+ st.info(f"πŸ“Š Need {needed} more high-quality Groq examples")
256
+ progress = high_quality_groq / target_examples if target_examples > 0 else 0
257
+ st.progress(progress)
258
+ st.caption(f"Progress: {high_quality_groq}/{target_examples} examples")
259
+
260
+ def render_service_status():
261
+ st.markdown("---")
262
+ st.subheader("πŸ›œ Platform Status")
263
+
264
+ try:
265
+ status = model_manager.get_service_status()
266
+
267
+ # Create status columns
268
+ col1, col2 = st.columns(2)
269
+
270
+ with col1:
271
+ # Phi-3 Status
272
+ phi3_status = status["phi3"]
273
+ if phi3_status["server_healthy"] and phi3_status["model_available"]:
274
+ st.success("πŸ§ͺ Phi-3 Mini")
275
+ st.caption("Research Model β€’ Ready")
276
+ elif phi3_status["server_healthy"]:
277
+ st.warning("πŸ§ͺ Phi-3 Mini")
278
+ st.caption("Research Model β€’ Needs Setup")
279
+ else:
280
+ st.error("πŸ§ͺ Phi-3 Mini")
281
+ st.caption("Research Model β€’ Offline")
282
+
283
+ with col2:
284
+ # Groq Status
285
+ groq_status = status["groq"]
286
+ healthy_count = groq_status['healthy_providers']
287
+ total_providers = groq_status['total_providers']
288
+
289
+ if healthy_count == total_providers:
290
+ st.success("πŸ“Š Groq API")
291
+ st.caption("Training Data β€’ Fully Operational")
292
+ elif healthy_count > 0:
293
+ st.warning("πŸ“Š Groq API")
294
+ st.caption(f"Training Data β€’ {healthy_count}/{total_providers} providers")
295
+ else:
296
+ st.error("πŸ“Š Groq API")
297
+ st.caption("Training Data β€’ Offline")
298
+
299
+ # Quick health indicator
300
+ if status["phi3"]["server_healthy"] and groq_status['healthy_providers'] > 0:
301
+ st.caption("πŸ’‘ All systems operational - research ready!")
302
+ else:
303
+ st.caption("⚠️ Some services limited - research may be affected")
304
+
305
+ except Exception as e:
306
+ st.error("❌ Status check failed")
307
+ st.caption("Research platform may have issues")
308
+
309
+ def render_default_sidebar():
310
+ st.info("🌟 Start generating content to contribute to our research!")
311
+ st.caption("Your feedback on Groq content will train Phi-3 to become a better educational AI")
312
+ if st.button("πŸ”„ Refresh Progress", use_container_width=True, key="refresh_progress"):
313
+ st.rerun()