Spaces:
Running
Running
WIP Fix 1
Browse files
app.py
CHANGED
|
@@ -12,30 +12,33 @@ DEFAULT_MODELS = {
|
|
| 12 |
|
| 13 |
# ---------- Model Descriptions ----------
|
| 14 |
MODEL_INFO = {
|
| 15 |
-
"mistralai/Mixtral-8x7B-Instruct-v0.1": "Balanced generalist;
|
| 16 |
-
"codellama/CodeLlama-7b-Instruct-hf": "
|
| 17 |
-
"tiiuae/falcon-7b-instruct": "
|
| 18 |
-
"google/gemma-2-9b-it": "Analytical critic
|
| 19 |
-
"meta-llama/Meta-Llama-3-8B-Instruct": "Balanced critic
|
| 20 |
-
"phind/Phind-CodeLlama-34B-v2": "
|
| 21 |
-
"stabilityai/stablelm-2-12b": "Fluent
|
| 22 |
}
|
| 23 |
|
| 24 |
-
# ----------
|
| 25 |
def load_pipeline(model_name):
|
| 26 |
return pipeline("text-generation", model=model_name)
|
| 27 |
|
| 28 |
# ---------- Core Logic ----------
|
| 29 |
def refine_prompt(idea, model_name):
|
| 30 |
model = load_pipeline(model_name)
|
| 31 |
-
refined = model(
|
|
|
|
|
|
|
|
|
|
| 32 |
return refined.strip()
|
| 33 |
|
| 34 |
def generate_code(prompt, model_name):
|
| 35 |
model = load_pipeline(model_name)
|
| 36 |
pseudo = model(f"Create simple pseudocode for: {prompt}", max_new_tokens=200)[0]["generated_text"]
|
| 37 |
simple = model(f"Expand this pseudocode into a simple code snippet:\n{pseudo}", max_new_tokens=300)[0]["generated_text"]
|
| 38 |
-
full = model(f"Turn this snippet into a complete,
|
| 39 |
return pseudo.strip(), simple.strip(), full.strip()
|
| 40 |
|
| 41 |
def generate_book(prompt, model_name):
|
|
@@ -43,45 +46,44 @@ def generate_book(prompt, model_name):
|
|
| 43 |
structure = ["Start", "Development", "Climax", "Conclusion", "End"]
|
| 44 |
parts = []
|
| 45 |
for section in structure:
|
| 46 |
-
part = model(f"Write the {section} section of a short book based on this idea: {prompt}", max_new_tokens=
|
| 47 |
parts.append(f"### {section}\n{part.strip()}\n")
|
| 48 |
return "\n".join(parts)
|
| 49 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 50 |
def get_critic_feedback(output_text, model1_name, model2_name):
|
| 51 |
critic1 = load_pipeline(model1_name)
|
| 52 |
critic2 = load_pipeline(model2_name)
|
| 53 |
-
critique_1 = critic1(f"Rate this text from 0 to 100 and
|
| 54 |
-
critique_2 = critic2(f"Rate this text from 0 to 100 and
|
| 55 |
return critique_1.strip(), critique_2.strip()
|
| 56 |
|
| 57 |
-
|
| 58 |
-
model = load_pipeline(model_name)
|
| 59 |
-
combined_feedback = f"Critic 1: {feedback1}\nCritic 2: {feedback2}"
|
| 60 |
-
refined = model(f"Refine this text based on the critics' feedback:\n{combined_feedback}\nOriginal text:\n{output_text}", max_new_tokens=700)[0]["generated_text"]
|
| 61 |
-
return refined.strip()
|
| 62 |
-
|
| 63 |
-
# ---------- Workflow Function ----------
|
| 64 |
def workflow(idea, mode, prompt_model, code_model, book_model, critic1_model, critic2_model):
|
| 65 |
refined_prompt = refine_prompt(idea, prompt_model)
|
| 66 |
|
| 67 |
if mode == "Code mode":
|
| 68 |
pseudo, simple, full = generate_code(refined_prompt, code_model)
|
| 69 |
-
|
| 70 |
-
|
| 71 |
else:
|
| 72 |
book_text = generate_book(refined_prompt, book_model)
|
| 73 |
-
|
| 74 |
-
|
| 75 |
|
| 76 |
-
|
| 77 |
-
|
| 78 |
|
| 79 |
-
return refined_prompt,
|
| 80 |
|
| 81 |
|
| 82 |
# ---------- UI ----------
|
| 83 |
with gr.Blocks() as demo:
|
| 84 |
-
gr.Markdown("# π€ AI Workflow: Code or Book Creator with
|
| 85 |
|
| 86 |
with gr.Tab("Main"):
|
| 87 |
idea_input = gr.Textbox(label="Enter your idea", placeholder="Type an idea...")
|
|
@@ -89,13 +91,13 @@ with gr.Blocks() as demo:
|
|
| 89 |
submit = gr.Button("Generate")
|
| 90 |
|
| 91 |
refined_prompt_box = gr.Textbox(label="Refined Prompt")
|
| 92 |
-
|
|
|
|
| 93 |
critic1_box = gr.Textbox(label="Critic 1 Feedback")
|
| 94 |
critic2_box = gr.Textbox(label="Critic 2 Feedback")
|
| 95 |
-
refined_output_box = gr.Markdown(label="Final Refined Version")
|
| 96 |
|
| 97 |
with gr.Accordion("π§© Advanced Options", open=False):
|
| 98 |
-
gr.Markdown("### Choose models and learn their
|
| 99 |
|
| 100 |
def model_choices():
|
| 101 |
return list(MODEL_INFO.keys())
|
|
@@ -106,17 +108,21 @@ with gr.Blocks() as demo:
|
|
| 106 |
critic1_model = gr.Dropdown(model_choices(), label="Critic 1", value=DEFAULT_MODELS["critic_1"])
|
| 107 |
critic2_model = gr.Dropdown(model_choices(), label="Critic 2", value=DEFAULT_MODELS["critic_2"])
|
| 108 |
|
| 109 |
-
model_info_box = gr.Markdown("Hover
|
| 110 |
|
| 111 |
def show_model_info(model_name):
|
| 112 |
return f"**{model_name}** β {MODEL_INFO.get(model_name, 'No info available.')}"
|
| 113 |
|
| 114 |
prompt_model.change(show_model_info, inputs=prompt_model, outputs=model_info_box)
|
|
|
|
|
|
|
|
|
|
|
|
|
| 115 |
|
| 116 |
submit.click(
|
| 117 |
fn=workflow,
|
| 118 |
inputs=[idea_input, mode_select, prompt_model, code_model, book_model, critic1_model, critic2_model],
|
| 119 |
-
outputs=[refined_prompt_box,
|
| 120 |
)
|
| 121 |
|
| 122 |
demo.launch()
|
|
|
|
| 12 |
|
| 13 |
# ---------- Model Descriptions ----------
|
| 14 |
MODEL_INFO = {
|
| 15 |
+
"mistralai/Mixtral-8x7B-Instruct-v0.1": "Balanced generalist; strong in structured reasoning and storytelling.",
|
| 16 |
+
"codellama/CodeLlama-7b-Instruct-hf": "Excellent code generator; best for pseudocode expansion and logic clarity.",
|
| 17 |
+
"tiiuae/falcon-7b-instruct": "Fast and lightweight; good for simple creative text, but less technical precision.",
|
| 18 |
+
"google/gemma-2-9b-it": "Analytical critic that provides detailed, structured feedback.",
|
| 19 |
+
"meta-llama/Meta-Llama-3-8B-Instruct": "Balanced critic; creative and nuanced, slightly more lenient.",
|
| 20 |
+
"phind/Phind-CodeLlama-34B-v2": "Expert coder model; verbose but deeply logical and precise.",
|
| 21 |
+
"stabilityai/stablelm-2-12b": "Fluent natural-language generator; great for fiction and tone consistency."
|
| 22 |
}
|
| 23 |
|
| 24 |
+
# ---------- Helpers ----------
|
| 25 |
def load_pipeline(model_name):
|
| 26 |
return pipeline("text-generation", model=model_name)
|
| 27 |
|
| 28 |
# ---------- Core Logic ----------
|
| 29 |
def refine_prompt(idea, model_name):
|
| 30 |
model = load_pipeline(model_name)
|
| 31 |
+
refined = model(
|
| 32 |
+
f"Refine this creative idea into a concise, high-quality prompt: {idea}",
|
| 33 |
+
max_new_tokens=200
|
| 34 |
+
)[0]["generated_text"]
|
| 35 |
return refined.strip()
|
| 36 |
|
| 37 |
def generate_code(prompt, model_name):
|
| 38 |
model = load_pipeline(model_name)
|
| 39 |
pseudo = model(f"Create simple pseudocode for: {prompt}", max_new_tokens=200)[0]["generated_text"]
|
| 40 |
simple = model(f"Expand this pseudocode into a simple code snippet:\n{pseudo}", max_new_tokens=300)[0]["generated_text"]
|
| 41 |
+
full = model(f"Turn this snippet into a complete, functional program:\n{simple}", max_new_tokens=700)[0]["generated_text"]
|
| 42 |
return pseudo.strip(), simple.strip(), full.strip()
|
| 43 |
|
| 44 |
def generate_book(prompt, model_name):
|
|
|
|
| 46 |
structure = ["Start", "Development", "Climax", "Conclusion", "End"]
|
| 47 |
parts = []
|
| 48 |
for section in structure:
|
| 49 |
+
part = model(f"Write the {section} section of a short book based on this idea: {prompt}", max_new_tokens=400)[0]["generated_text"]
|
| 50 |
parts.append(f"### {section}\n{part.strip()}\n")
|
| 51 |
return "\n".join(parts)
|
| 52 |
|
| 53 |
+
def refine_output(output_text, model_name):
|
| 54 |
+
model = load_pipeline(model_name)
|
| 55 |
+
refined = model(f"Improve this text/code while preserving meaning and coherence:\n{output_text}", max_new_tokens=700)[0]["generated_text"]
|
| 56 |
+
return refined.strip()
|
| 57 |
+
|
| 58 |
def get_critic_feedback(output_text, model1_name, model2_name):
|
| 59 |
critic1 = load_pipeline(model1_name)
|
| 60 |
critic2 = load_pipeline(model2_name)
|
| 61 |
+
critique_1 = critic1(f"Rate this text from 0 to 100 and justify the score briefly:\n{output_text}", max_new_tokens=200)[0]["generated_text"]
|
| 62 |
+
critique_2 = critic2(f"Rate this text from 0 to 100 and justify the score briefly:\n{output_text}", max_new_tokens=200)[0]["generated_text"]
|
| 63 |
return critique_1.strip(), critique_2.strip()
|
| 64 |
|
| 65 |
+
# ---------- Workflow ----------
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 66 |
def workflow(idea, mode, prompt_model, code_model, book_model, critic1_model, critic2_model):
|
| 67 |
refined_prompt = refine_prompt(idea, prompt_model)
|
| 68 |
|
| 69 |
if mode == "Code mode":
|
| 70 |
pseudo, simple, full = generate_code(refined_prompt, code_model)
|
| 71 |
+
generated_output = f"## Refined Prompt\n{refined_prompt}\n\n### Pseudocode\n{pseudo}\n\n### Simple Code\n{simple}\n\n### Final Code\n{full}"
|
| 72 |
+
refined_final = refine_output(full, prompt_model)
|
| 73 |
else:
|
| 74 |
book_text = generate_book(refined_prompt, book_model)
|
| 75 |
+
generated_output = f"## Refined Prompt\n{refined_prompt}\n\n{book_text}"
|
| 76 |
+
refined_final = refine_output(book_text, prompt_model)
|
| 77 |
|
| 78 |
+
# Critics now evaluate the REFINED version
|
| 79 |
+
feedback1, feedback2 = get_critic_feedback(refined_final, critic1_model, critic2_model)
|
| 80 |
|
| 81 |
+
return refined_prompt, generated_output, refined_final, feedback1, feedback2
|
| 82 |
|
| 83 |
|
| 84 |
# ---------- UI ----------
|
| 85 |
with gr.Blocks() as demo:
|
| 86 |
+
gr.Markdown("# π€ AI Workflow: Code or Book Creator with Post-Refinement Critique")
|
| 87 |
|
| 88 |
with gr.Tab("Main"):
|
| 89 |
idea_input = gr.Textbox(label="Enter your idea", placeholder="Type an idea...")
|
|
|
|
| 91 |
submit = gr.Button("Generate")
|
| 92 |
|
| 93 |
refined_prompt_box = gr.Textbox(label="Refined Prompt")
|
| 94 |
+
raw_output_box = gr.Markdown(label="Generated Output (Before Refinement)")
|
| 95 |
+
refined_output_box = gr.Markdown(label="Refined Final Version (Self-Improved)")
|
| 96 |
critic1_box = gr.Textbox(label="Critic 1 Feedback")
|
| 97 |
critic2_box = gr.Textbox(label="Critic 2 Feedback")
|
|
|
|
| 98 |
|
| 99 |
with gr.Accordion("π§© Advanced Options", open=False):
|
| 100 |
+
gr.Markdown("### Choose models and learn their characteristics:")
|
| 101 |
|
| 102 |
def model_choices():
|
| 103 |
return list(MODEL_INFO.keys())
|
|
|
|
| 108 |
critic1_model = gr.Dropdown(model_choices(), label="Critic 1", value=DEFAULT_MODELS["critic_1"])
|
| 109 |
critic2_model = gr.Dropdown(model_choices(), label="Critic 2", value=DEFAULT_MODELS["critic_2"])
|
| 110 |
|
| 111 |
+
model_info_box = gr.Markdown("Hover or change selection to view model info.")
|
| 112 |
|
| 113 |
def show_model_info(model_name):
|
| 114 |
return f"**{model_name}** β {MODEL_INFO.get(model_name, 'No info available.')}"
|
| 115 |
|
| 116 |
prompt_model.change(show_model_info, inputs=prompt_model, outputs=model_info_box)
|
| 117 |
+
code_model.change(show_model_info, inputs=code_model, outputs=model_info_box)
|
| 118 |
+
book_model.change(show_model_info, inputs=book_model, outputs=model_info_box)
|
| 119 |
+
critic1_model.change(show_model_info, inputs=critic1_model, outputs=model_info_box)
|
| 120 |
+
critic2_model.change(show_model_info, inputs=critic2_model, outputs=model_info_box)
|
| 121 |
|
| 122 |
submit.click(
|
| 123 |
fn=workflow,
|
| 124 |
inputs=[idea_input, mode_select, prompt_model, code_model, book_model, critic1_model, critic2_model],
|
| 125 |
+
outputs=[refined_prompt_box, raw_output_box, refined_output_box, critic1_box, critic2_box],
|
| 126 |
)
|
| 127 |
|
| 128 |
demo.launch()
|