PatoFlamejanteTV commited on
Commit
7a034cb
·
verified ·
1 Parent(s): 8bb28df

fixing stuff...

Browse files
Files changed (1) hide show
  1. app.py +115 -41
app.py CHANGED
@@ -81,48 +81,122 @@ def workflow(idea, mode, prompt_model, code_model, book_model, critic1_model, cr
81
  return refined_prompt, generated_output, refined_final, feedback1, feedback2
82
 
83
 
84
- # ---------- UI ----------
85
- with gr.Blocks() as demo:
86
- gr.Markdown("# 🤖 AI Workflow: Code or Book Creator with Post-Refinement Critique")
87
-
88
- with gr.Tab("Main"):
89
- idea_input = gr.Textbox(label="Enter your idea", placeholder="Type an idea...")
90
- mode_select = gr.Radio(["Code mode", "Book mode"], label="Mode")
91
- submit = gr.Button("Generate")
92
-
93
- refined_prompt_box = gr.Textbox(label="Refined Prompt")
94
- raw_output_box = gr.Markdown(label="Generated Output (Before Refinement)")
95
- refined_output_box = gr.Markdown(label="Refined Final Version (Self-Improved)")
96
- critic1_box = gr.Textbox(label="Critic 1 Feedback")
97
- critic2_box = gr.Textbox(label="Critic 2 Feedback")
98
-
99
- with gr.Accordion("🧩 Advanced Options", open=False):
100
- gr.Markdown("### Choose models and learn their characteristics:")
101
-
102
- def model_choices():
103
- return list(MODEL_INFO.keys())
104
-
105
- prompt_model = gr.Dropdown(model_choices(), label="Prompt Refiner", value=DEFAULT_MODELS["prompt_refiner"])
106
- code_model = gr.Dropdown(model_choices(), label="Code Generator", value=DEFAULT_MODELS["code_model"])
107
- book_model = gr.Dropdown(model_choices(), label="Book Generator", value=DEFAULT_MODELS["book_model"])
108
- critic1_model = gr.Dropdown(model_choices(), label="Critic 1", value=DEFAULT_MODELS["critic_1"])
109
- critic2_model = gr.Dropdown(model_choices(), label="Critic 2", value=DEFAULT_MODELS["critic_2"])
110
-
111
- model_info_box = gr.Markdown("Hover or change selection to view model info.")
112
-
113
- def show_model_info(model_name):
114
- return f"**{model_name}** → {MODEL_INFO.get(model_name, 'No info available.')}"
115
-
116
- prompt_model.change(show_model_info, inputs=prompt_model, outputs=model_info_box)
117
- code_model.change(show_model_info, inputs=code_model, outputs=model_info_box)
118
- book_model.change(show_model_info, inputs=book_model, outputs=model_info_box)
119
- critic1_model.change(show_model_info, inputs=critic1_model, outputs=model_info_box)
120
- critic2_model.change(show_model_info, inputs=critic2_model, outputs=model_info_box)
121
-
122
- submit.click(
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
123
  fn=workflow,
124
- inputs=[idea_input, mode_select, prompt_model, code_model, book_model, critic1_model, critic2_model],
125
- outputs=[refined_prompt_box, raw_output_box, refined_output_box, critic1_box, critic2_box],
126
  )
127
 
128
  demo.launch()
 
81
  return refined_prompt, generated_output, refined_final, feedback1, feedback2
82
 
83
 
84
+ import gradio as gr
85
+ from transformers import pipeline
86
+
87
+ # -------------------------------
88
+ # Model configuration dictionary
89
+ # -------------------------------
90
+ MODEL_INFO = {
91
+ "gpt2": "117M params: Classic small model for text generation, coherent short outputs.",
92
+ "tiiuae/falcon-rw-1b": "1B params: Lightweight general model, good for creative text or simple logic.",
93
+ "microsoft/phi-2": "2.7B params: Compact and strong for reasoning or code, moderate GPU load.",
94
+ "Qwen/Qwen2.5-0.5B-Instruct": "0.5B params: Efficient instruction model, performs well for structured prompts.",
95
+ "TinyLlama/TinyLlama-1.1B-Chat-v1.0": "1.1B params: Balanced, fast, and decent for storytelling and small code snippets.",
96
+ "SmolLM2-135M": "135M params: Extremely light, suitable for quick text generation with limited coherence.",
97
+ }
98
+
99
+ def get_model_description(model_name):
100
+ """Return description of selected model."""
101
+ return MODEL_INFO.get(model_name, "Select a model to view details.")
102
+
103
+ # -------------------------------
104
+ # Pipelines Setup
105
+ # -------------------------------
106
+ def load_pipeline(model_name):
107
+ """Load a text generation pipeline for a given model."""
108
+ return pipeline("text-generation", model=model_name, device_map="auto")
109
+
110
+ # Default base models for specific roles
111
+ REFINER_MODEL = "TinyLlama/TinyLlama-1.1B-Chat-v1.0"
112
+ CRITIC_MODEL_1 = "Qwen/Qwen2.5-0.5B-Instruct"
113
+ CRITIC_MODEL_2 = "tiiuae/falcon-rw-1b"
114
+
115
+ # Preload pipelines for speed
116
+ refiner_pipe = load_pipeline(REFINER_MODEL)
117
+ critic_pipe_1 = load_pipeline(CRITIC_MODEL_1)
118
+ critic_pipe_2 = load_pipeline(CRITIC_MODEL_2)
119
+
120
+ # -------------------------------
121
+ # Core Logic
122
+ # -------------------------------
123
+ def workflow(idea, mode, model_name):
124
+ # Step 1: Refine the idea
125
+ ref_prompt = f"Refine this idea into a clear, specific prompt for {mode}:\n\n{idea}"
126
+ refined = refiner_pipe(ref_prompt, max_new_tokens=120, temperature=0.7)[0]["generated_text"]
127
+
128
+ # Step 2: Generate output
129
+ gen_pipe = load_pipeline(model_name)
130
+ if mode == "Code mode":
131
+ code_prompt = f"Create complete working code for this idea:\n\n{refined}\nInclude comments and clear structure."
132
+ else:
133
+ code_prompt = f"Write a short book with sections: Start, Development, Climax, Conclusion, and End. The theme:\n\n{refined}"
134
+
135
+ output = gen_pipe(code_prompt, max_new_tokens=500, temperature=0.8, do_sample=True)[0]["generated_text"]
136
+
137
+ # Step 3: Critics
138
+ critique_prompt = (
139
+ f"Rate the following {mode} output from 0 to 100, and explain weaknesses and improvements:\n\n{output}"
140
+ )
141
+ feedback_1 = critic_pipe_1(critique_prompt, max_new_tokens=200)[0]["generated_text"]
142
+ feedback_2 = critic_pipe_2(critique_prompt, max_new_tokens=200)[0]["generated_text"]
143
+
144
+ # Try to extract a numeric rating
145
+ def extract_score(text):
146
+ import re
147
+ match = re.search(r"(\d{1,3})", text)
148
+ if match:
149
+ score = int(match.group(1))
150
+ return min(100, max(0, score))
151
+ return 50
152
+
153
+ score1 = extract_score(feedback_1)
154
+ score2 = extract_score(feedback_2)
155
+ avg_score = (score1 + score2) / 2
156
+
157
+ # Step 4: Refine based on critics
158
+ refine_final_prompt = f"Refine this output based on these two critics' feedbacks:\n\nCritic 1: {feedback_1}\n\nCritic 2: {feedback_2}\n\nOriginal Output:\n{output}"
159
+ final_output = refiner_pipe(refine_final_prompt, max_new_tokens=400)[0]["generated_text"]
160
+
161
+ combined_feedback = f"Critic 1 ({score1}/100): {feedback_1}\n\nCritic 2 ({score2}/100): {feedback_2}"
162
+
163
+ return refined, output, avg_score, combined_feedback, final_output
164
+
165
+
166
+ # -------------------------------
167
+ # Gradio Interface
168
+ # -------------------------------
169
+ with gr.Blocks(theme=gr.themes.Soft()) as demo:
170
+ gr.Markdown("## 🧩 AI Idea Refinement & Creation Workflow")
171
+
172
+ idea_input = gr.Textbox(
173
+ label="💡 Your Idea",
174
+ placeholder="Type your idea (e.g. 'A tool that teaches programming using natural language puzzles')"
175
+ )
176
+
177
+ mode = gr.Radio(["Code mode", "Book mode"], label="Select Mode", value="Code mode")
178
+
179
+ with gr.Accordion("⚙️ Advanced Options", open=False):
180
+ model_dropdown = gr.Dropdown(
181
+ choices=list(MODEL_INFO.keys()),
182
+ value="TinyLlama/TinyLlama-1.1B-Chat-v1.0",
183
+ label="Model for Main Generation"
184
+ )
185
+ model_desc = gr.Markdown(get_model_description("TinyLlama/TinyLlama-1.1B-Chat-v1.0"))
186
+ model_dropdown.change(fn=get_model_description, inputs=model_dropdown, outputs=model_desc)
187
+
188
+ refined_prompt = gr.Textbox(label="🧠 Refined Prompt", interactive=False)
189
+ initial_output = gr.Textbox(label="🧾 Generated Output (Pre-Critics)", lines=10)
190
+ critic_score = gr.Number(label="📊 Average Score (0–100)", interactive=False)
191
+ critic_feedback = gr.Textbox(label="🧩 Critics’ Combined Feedback", lines=10)
192
+ refined_output = gr.Textbox(label="💎 Final Refined Output (Post-Critics)", lines=10)
193
+
194
+ run_button = gr.Button("🚀 Run Full Workflow")
195
+
196
+ run_button.click(
197
  fn=workflow,
198
+ inputs=[idea_input, mode, model_dropdown],
199
+ outputs=[refined_prompt, initial_output, critic_score, critic_feedback, refined_output]
200
  )
201
 
202
  demo.launch()