Spaces:
Running
Running
| import gradio as gr | |
| import random | |
| import time | |
| # Define dummy loras list for the gallery | |
| loras = [ | |
| {"repo": "stabilityai/stable-diffusion-xl-base-1.0", "image": "https://huggingface.co/spaces/reach-vb/Blazingly-fast-LoRA/resolve/main/flux_lora.png", "title": "SDXL Base 1.0"}, | |
| {"repo": "stabilityai/sdxl-turbo", "image": "https://huggingface.co/stabilityai/sdxl-turbo/resolve/main/banner.png", "title": "SDXL Turbo"}, | |
| {"repo": "runwayml/stable-diffusion-v1-5", "image": "https://huggingface.co/runwayml/stable-diffusion-v1-5/resolve/main/sd-v1-5.png", "title": "SD 1.5"}, | |
| {"repo": "SG161222/Realistic_Vision_V5.1_noVAE", "image": "https://huggingface.co/SG161222/Realistic_Vision_V5.1_noVAE/resolve/main/realistic_vision_v5.1.png", "title": "Realistic Vision V5.1"}, | |
| {"repo": "gsdf/Counterfeit-V3.0", "image": "https://huggingface.co/gsdf/Counterfeit-V3.0/resolve/main/cf3.jpg", "title": "Counterfeit V3.0"}, | |
| {"repo": "digiplay/AbsoluteReality_v1.8.1", "image": "https://huggingface.co/digiplay/AbsoluteReality_v1.8.1/resolve/main/ar.jpg", "title": "Absolute Reality v1.8.1"} | |
| ] | |
| # Custom CSS | |
| css = """ | |
| #title { text-align: center; margin-bottom: 10px; } | |
| #gallery {min-height: 450px; max-height: 650px; overflow-y: auto;} | |
| #gen_column {display: flex; align-items: flex-end; margin-bottom: 0.5rem;} | |
| #gen_btn {margin-bottom: 0.5rem; max-width: 100%;} | |
| #lora_list {font-size: 0.8em; margin-top: 0.5rem;} | |
| #progress {text-align: center; margin-top: 0.8rem;} | |
| """ | |
| font = "Montserrat" | |
| # Dummy function to update selection | |
| def update_selection(evt: gr.SelectData): | |
| """Update the UI when a model is selected from the gallery""" | |
| selected_index = evt.index | |
| selected_lora = loras[selected_index] | |
| # Get the model name to display | |
| model_title = selected_lora.get("title", selected_lora["repo"].split('/')[-1]) | |
| # Create an informative text about the selected model | |
| info_text = f"Selected model: **{model_title}**\n\nModel ID: `{selected_lora['repo']}`" | |
| # Return with appropriate placeholder text for the prompt | |
| return ( | |
| gr.update(placeholder=f"Enter your prompt for {model_title}..."), | |
| info_text, | |
| selected_index, | |
| 1024, # Default width | |
| 1024 # Default height | |
| ) | |
| # Dummy function to add a custom model | |
| def add_custom_lora(lora_id): | |
| """Add a custom model from a Hugging Face ID or URL""" | |
| if not lora_id or lora_id.strip() == "": | |
| return gr.update(), gr.update(visible=False), gr.update(), gr.update(), None, gr.update() | |
| # Create a dummy entry for the custom model | |
| custom_entry = { | |
| "repo": lora_id, | |
| "image": "https://huggingface.co/spaces/reach-vb/Blazingly-fast-LoRA/resolve/main/flux_lora.png", # Placeholder image | |
| "title": f"Custom: {lora_id.split('/')[-1]}" | |
| } | |
| # Add the custom entry to the gallery list | |
| updated_loras = loras.copy() | |
| updated_loras.append(custom_entry) | |
| # Create info HTML for the custom model | |
| info_html = f""" | |
| <div style="padding: 10px; border: 1px solid #ddd; border-radius: 5px; margin-top: 10px;"> | |
| <p><b>Custom model added:</b> {lora_id}</p> | |
| </div> | |
| """ | |
| # Create info text for selection | |
| info_text = f"Using custom model: **{lora_id}**" | |
| # Return with updates | |
| return ( | |
| gr.update(value=info_html, visible=True), | |
| gr.update(visible=True), | |
| gr.update(value=[(item.get("image"), item.get("title", item["repo"].split('/')[-1])) for item in updated_loras]), | |
| info_text, | |
| len(updated_loras) - 1, # Index of the newly added model | |
| gr.update(placeholder=f"Enter your prompt for custom model {lora_id.split('/')[-1]}...") | |
| ) | |
| # Dummy function to remove custom model info | |
| def remove_custom_lora(): | |
| """Remove custom model information and reset UI""" | |
| return ( | |
| gr.update(visible=False), | |
| gr.update(visible=False), | |
| gr.update(value=[(item.get("image"), item.get("title", item["repo"].split('/')[-1])) for item in loras]), | |
| "Select a base model or add a custom one below.", | |
| None, | |
| gr.update(value="") | |
| ) | |
| # Dummy function to generate images | |
| def run_lora(prompt, selected_index, seed, width, height): | |
| """Simulate image generation with the selected model""" | |
| if selected_index is None: | |
| return gr.update(value=None), seed, gr.update(value="Please select a model first.", visible=True) | |
| if not prompt or prompt.strip() == "": | |
| return gr.update(value=None), seed, gr.update(value="Please enter a prompt.", visible=True) | |
| # Show progress bar | |
| progress = gr.update(value="Generating your image...", visible=True) | |
| # Simulate generation delay | |
| time.sleep(2) | |
| # If seed is 0, randomize it | |
| if seed == 0: | |
| seed = random.randint(1, 2147483647) | |
| # Get the selected model info | |
| model_info = loras[selected_index] if selected_index < len(loras) else {"repo": "custom_model", "title": "Custom Model"} | |
| # For dummy purposes, we'll just return the model's image or a placeholder | |
| # In a real implementation, you would call an API to generate the image | |
| result_image = model_info.get("image", "https://huggingface.co/spaces/reach-vb/Blazingly-fast-LoRA/resolve/main/flux_lora.png") | |
| # Hide progress bar | |
| progress = gr.update(visible=False) | |
| return result_image, seed, progress | |
| # Now you can run the app with this code: | |
| if __name__ == "__main__": | |
| with gr.Blocks(theme=gr.themes.Soft(font=font), css=css) as app: | |
| title = gr.HTML( | |
| """<h1><a href="https://huggingface.co/docs/inference-providers/en/index">Blazingly Fast LoRA by Fal & HF (this is a dummy app)</a> 🤗</h1>""", | |
| elem_id="title", | |
| ) | |
| title = gr.HTML( | |
| """<h3>This is just a dummy app</h3>""", | |
| elem_id="subtitle", | |
| ) | |
| # --- States for parameters previously in Advanced Settings --- | |
| selected_index = gr.State(None) | |
| width = gr.State(1024) # Default width | |
| height = gr.State(1024) # Default height | |
| seed = gr.State(0) # Default seed (will be randomized by run_lora) | |
| with gr.Row(): | |
| with gr.Column(scale=3): | |
| prompt = gr.Textbox(label="Prompt", lines=1, placeholder="Type a prompt after selecting a LoRA/Model") | |
| with gr.Column(scale=1, elem_id="gen_column"): | |
| generate_button = gr.Button("Generate", variant="primary", elem_id="gen_btn") | |
| with gr.Row(): | |
| with gr.Column(): | |
| selected_info = gr.Markdown("Select a base model or add a custom one below.") # Updated initial text | |
| gallery = gr.Gallery( | |
| # Ensure items have 'image' and 'title' keys, provide fallbacks if needed | |
| [(item.get("image"), item.get("title", item["repo"].split('/')[-1])) for item in loras], | |
| label="Model Gallery", # Changed label | |
| allow_preview=False, | |
| columns=3, | |
| elem_id="gallery", | |
| show_share_button=False | |
| ) | |
| with gr.Group(): | |
| custom_lora = gr.Textbox(label="Custom Model", info="Hugging Face model ID (e.g., user/model-name) or URL", placeholder="stabilityai/stable-diffusion-xl-base-1.0") # Updated label/placeholder | |
| gr.Markdown("[Check Hugging Face Models](https://huggingface.co/models?inference_provider=fal-ai&pipeline_tag=text-to-image&sort=trending)", elem_id="lora_list") # Updated link/text | |
| custom_lora_info = gr.HTML(visible=False) | |
| custom_lora_button = gr.Button("Clear custom model info", visible=False) # Changed button text | |
| with gr.Column(): | |
| # Keep progress bar element, but it will only be shown briefly if API is slow, then hidden by run_lora return | |
| progress_bar = gr.Markdown(elem_id="progress", visible=False, value="Generating...") | |
| result = gr.Image(label="Generated Image") | |
| # Display the seed used for the generation | |
| used_seed_display = gr.Textbox(label="Seed Used", value=0, interactive=False) # Display seed used | |
| gallery.select( | |
| update_selection, | |
| inputs=[], # No direct inputs needed, uses evt | |
| # Update prompt placeholder, selection text, selected index state, and width/height states | |
| outputs=[prompt, selected_info, selected_index, width, height], | |
| api_name=False, | |
| ) | |
| # Use submit event for Textbox to trigger add_custom_lora | |
| custom_lora.submit( | |
| add_custom_lora, | |
| inputs=[custom_lora], | |
| # Outputs: info card, remove button, gallery, selection text, selected index state, prompt placeholder | |
| outputs=[custom_lora_info, custom_lora_button, gallery, selected_info, selected_index, prompt], | |
| api_name=False, | |
| ) | |
| custom_lora_button.click( | |
| remove_custom_lora, | |
| outputs=[custom_lora_info, custom_lora_button, gallery, selected_info, selected_index, custom_lora], # Clear textbox too | |
| api_name=False, | |
| ) | |
| gr.on( | |
| triggers=[generate_button.click, prompt.submit], | |
| fn=run_lora, | |
| # Inputs now use state variables for width, height, seed | |
| inputs=[prompt, selected_index, seed, width, height], | |
| # Outputs: result image, seed state (updated with used seed), progress bar update | |
| outputs=[result, seed, progress_bar], | |
| api_name=False, | |
| ).then( | |
| # Update the displayed seed value after run_lora completes | |
| lambda s: gr.update(value=s), | |
| inputs=[seed], | |
| outputs=[used_seed_display], | |
| api_name=False, | |
| ) | |
| app.queue() | |
| app.launch(mcp_server=True,) |