Spaces:
Running
Running
| import numpy as np | |
| import torch | |
| import torch.nn as nn | |
| import gradio as gr | |
| from PIL import Image | |
| import torchvision.transforms as transforms | |
| import os | |
| import io | |
| import base64 | |
| import json | |
| from datetime import datetime | |
| import torch.nn.functional as F | |
| # Force CPU mode for Zero GPU environment | |
| device = torch.device('cpu') | |
| torch.set_num_threads(4) # Optimize CPU performance | |
| # Optimize memory usage | |
| torch.backends.cudnn.benchmark = False | |
| torch.backends.cudnn.deterministic = True | |
| # Reduce memory usage for history | |
| MAX_HISTORY_ENTRIES = 5 | |
| # Style presets | |
| STYLE_PRESETS = { | |
| "Sketch": {"line_thickness": 1.0, "contrast": 1.2, "brightness": 1.0}, | |
| "Bold": {"line_thickness": 1.5, "contrast": 1.4, "brightness": 0.8}, | |
| "Light": {"line_thickness": 0.8, "contrast": 0.9, "brightness": 1.2}, | |
| "High Contrast": {"line_thickness": 1.2, "contrast": 1.6, "brightness": 0.7}, | |
| } | |
| # History management | |
| class HistoryManager: | |
| def __init__(self, max_entries=10): | |
| self.max_entries = max_entries | |
| self.history_file = "processing_history.json" | |
| self.history = self.load_history() | |
| def load_history(self): | |
| try: | |
| if os.path.exists(self.history_file): | |
| with open(self.history_file, 'r') as f: | |
| return json.load(f) | |
| return [] | |
| except Exception: | |
| return [] | |
| def save_history(self): | |
| try: | |
| with open(self.history_file, 'w') as f: | |
| json.dump(self.history[-self.max_entries:], f) | |
| except Exception as e: | |
| print(f"Error saving history: {e}") | |
| def add_entry(self, input_path, settings): | |
| entry = { | |
| "timestamp": datetime.now().strftime("%Y-%m-%d %H:%M:%S"), | |
| "input_file": os.path.basename(input_path), | |
| "settings": settings | |
| } | |
| self.history.append(entry) | |
| if len(self.history) > self.max_entries: | |
| self.history.pop(0) | |
| self.save_history() | |
| def get_latest_settings(self): | |
| if self.history: | |
| return self.history[-1]["settings"] | |
| return None | |
| # Initialize history manager with reduced entries | |
| history_manager = HistoryManager(max_entries=MAX_HISTORY_ENTRIES) | |
| norm_layer = nn.InstanceNorm2d | |
| class ResidualBlock(nn.Module): | |
| def __init__(self, in_features): | |
| super(ResidualBlock, self).__init__() | |
| conv_block = [ nn.ReflectionPad2d(1), | |
| nn.Conv2d(in_features, in_features, 3), | |
| norm_layer(in_features), | |
| nn.ReLU(inplace=True), | |
| nn.ReflectionPad2d(1), | |
| nn.Conv2d(in_features, in_features, 3), | |
| norm_layer(in_features) ] | |
| self.conv_block = nn.Sequential(*conv_block) | |
| def forward(self, x): | |
| return x + self.conv_block(x) | |
| class Generator(nn.Module): | |
| def __init__(self, input_nc, output_nc, n_residual_blocks=9, sigmoid=True): | |
| super(Generator, self).__init__() | |
| # Initial convolution block | |
| model0 = [ nn.ReflectionPad2d(3), | |
| nn.Conv2d(input_nc, 64, 7), | |
| norm_layer(64), | |
| nn.ReLU(inplace=True) ] | |
| self.model0 = nn.Sequential(*model0) | |
| # Downsampling | |
| model1 = [] | |
| in_features = 64 | |
| out_features = in_features*2 | |
| for _ in range(2): | |
| model1 += [ nn.Conv2d(in_features, out_features, 3, stride=2, padding=1), | |
| norm_layer(out_features), | |
| nn.ReLU(inplace=True) ] | |
| in_features = out_features | |
| out_features = in_features*2 | |
| self.model1 = nn.Sequential(*model1) | |
| # Residual blocks | |
| model2 = [] | |
| for _ in range(n_residual_blocks): | |
| model2 += [ResidualBlock(in_features)] | |
| self.model2 = nn.Sequential(*model2) | |
| # Upsampling | |
| model3 = [] | |
| out_features = in_features//2 | |
| for _ in range(2): | |
| model3 += [ nn.ConvTranspose2d(in_features, out_features, 3, stride=2, padding=1, output_padding=1), | |
| norm_layer(out_features), | |
| nn.ReLU(inplace=True) ] | |
| in_features = out_features | |
| out_features = in_features//2 | |
| self.model3 = nn.Sequential(*model3) | |
| # Output layer | |
| model4 = [ nn.ReflectionPad2d(3), | |
| nn.Conv2d(64, output_nc, 7)] | |
| if sigmoid: | |
| model4 += [nn.Sigmoid()] | |
| self.model4 = nn.Sequential(*model4) | |
| def forward(self, x): | |
| out = self.model0(x) | |
| out = self.model1(out) | |
| out = self.model2(out) | |
| out = self.model3(out) | |
| out = self.model4(out) | |
| return out | |
| # Initialize models | |
| def load_models(): | |
| try: | |
| print("Initializing models in CPU mode...") | |
| model1 = Generator(3, 1, 3) | |
| model2 = Generator(3, 1, 3) | |
| model1.load_state_dict(torch.load('model.pth', map_location='cpu')) | |
| model2.load_state_dict(torch.load('model2.pth', map_location='cpu')) | |
| model1.eval() | |
| model2.eval() | |
| torch.set_grad_enabled(False) | |
| print("Models loaded successfully in CPU mode") | |
| return model1, model2 | |
| except Exception as e: | |
| error_msg = f"Error loading models: {str(e)}" | |
| print(error_msg) | |
| raise gr.Error("Failed to initialize models. Please check the model files and system configuration.") | |
| # Load models | |
| try: | |
| print("Starting model initialization...") | |
| model1, model2 = load_models() | |
| print("Model initialization completed") | |
| except Exception as e: | |
| print(f"Critical error during model initialization: {str(e)}") | |
| raise gr.Error("Failed to start the application due to model initialization error.") | |
| def apply_preset(preset_name): | |
| """Apply a style preset and return the settings""" | |
| if preset_name in STYLE_PRESETS: | |
| return ( | |
| STYLE_PRESETS[preset_name]["line_thickness"], | |
| STYLE_PRESETS[preset_name]["contrast"], | |
| STYLE_PRESETS[preset_name]["brightness"], | |
| True # Enable enhancement for presets | |
| ) | |
| return (1.0, 1.0, 1.0, False) | |
| def enhance_lines(img, contrast=1.0, brightness=1.0): | |
| """Enhance line drawing with contrast and brightness adjustments""" | |
| enhanced = np.array(img) | |
| enhanced = enhanced * contrast | |
| enhanced = np.clip(enhanced + brightness, 0, 1) | |
| return Image.fromarray((enhanced * 255).astype(np.uint8)) | |
| def predict(input_img, version, preset_name, line_thickness=1.0, contrast=1.0, | |
| brightness=1.0, enable_enhancement=False, output_size="Original"): | |
| try: | |
| # Apply preset if selected | |
| if preset_name != "Custom": | |
| line_thickness, contrast, brightness, enable_enhancement = apply_preset(preset_name) | |
| # Open and process input image | |
| original_img = Image.open(input_img) | |
| original_size = original_img.size | |
| # Adjust output size | |
| if output_size != "Original": | |
| width, height = map(int, output_size.split("x")) | |
| target_size = (width, height) | |
| else: | |
| target_size = original_size | |
| # Transform pipeline | |
| transform = transforms.Compose([ | |
| transforms.Resize(256, Image.BICUBIC), | |
| transforms.ToTensor(), | |
| transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)) | |
| ]) | |
| input_tensor = transform(original_img).unsqueeze(0).to(device) | |
| # Process through selected model | |
| with torch.no_grad(): | |
| if version == 'Simple Lines': | |
| output = model2(input_tensor) | |
| else: | |
| output = model1(input_tensor) | |
| # Apply line thickness adjustment | |
| output = output * line_thickness | |
| # Convert to image | |
| output_img = transforms.ToPILImage()(output.squeeze().cpu().clamp(0, 1)) | |
| # Apply enhancements if enabled | |
| if enable_enhancement: | |
| output_img = enhance_lines(output_img, contrast, brightness) | |
| # Resize to target size | |
| output_img = output_img.resize(target_size, Image.BICUBIC) | |
| # Save to history | |
| settings = { | |
| "version": version, | |
| "preset": preset_name, | |
| "line_thickness": line_thickness, | |
| "contrast": contrast, | |
| "brightness": brightness, | |
| "enable_enhancement": enable_enhancement, | |
| "output_size": output_size | |
| } | |
| history_manager.add_entry(input_img, settings) | |
| return output_img | |
| except Exception as e: | |
| raise gr.Error(f"Error processing image: {str(e)}") | |
| # Custom CSS | |
| custom_css = """ | |
| .gradio-container { | |
| font-family: 'Helvetica Neue', Arial, sans-serif; | |
| max-width: 1200px !important; | |
| margin: auto; | |
| } | |
| .gr-button { | |
| border-radius: 8px; | |
| background: linear-gradient(45deg, #3498db, #2980b9); | |
| border: none; | |
| color: white; | |
| transition: all 0.3s ease; | |
| } | |
| .gr-button:hover { | |
| background: linear-gradient(45deg, #2980b9, #3498db); | |
| transform: translateY(-2px); | |
| box-shadow: 0 4px 12px rgba(0,0,0,0.15); | |
| } | |
| .gr-button.secondary { | |
| background: linear-gradient(45deg, #95a5a6, #7f8c8d); | |
| } | |
| .gr-input { | |
| border-radius: 8px; | |
| border: 2px solid #3498db; | |
| transition: all 0.3s ease; | |
| } | |
| .gr-input:focus { | |
| border-color: #2980b9; | |
| box-shadow: 0 0 0 2px rgba(41,128,185,0.2); | |
| } | |
| .gr-form { | |
| border-radius: 12px; | |
| box-shadow: 0 4px 12px rgba(0,0,0,0.1); | |
| padding: 20px; | |
| } | |
| .gr-header { | |
| text-align: center; | |
| margin-bottom: 2em; | |
| } | |
| """ | |
| # Create Gradio interface | |
| with gr.Blocks(css=custom_css) as iface: | |
| with gr.Row(elem_classes="gr-header"): | |
| gr.Markdown("# 🎨 Advanced Line Drawing Generator") | |
| gr.Markdown("Transform your images into beautiful line drawings with advanced controls") | |
| with gr.Row(): | |
| with gr.Column(scale=1): | |
| input_image = gr.Image(type="filepath", label="Upload Image") | |
| with gr.Row(): | |
| version = gr.Radio( | |
| choices=['Complex Lines', 'Simple Lines'], | |
| value='Simple Lines', | |
| label="Drawing Style" | |
| ) | |
| preset_selector = gr.Dropdown( | |
| choices=["Custom"] + list(STYLE_PRESETS.keys()), | |
| value="Custom", | |
| label="Style Preset" | |
| ) | |
| with gr.Accordion("Advanced Settings", open=False): | |
| output_size = gr.Dropdown( | |
| choices=["Original", "512x512", "1024x1024", "2048x2048"], | |
| value="Original", | |
| label="Output Size" | |
| ) | |
| line_thickness = gr.Slider( | |
| minimum=0.1, | |
| maximum=2.0, | |
| value=1.0, | |
| step=0.1, | |
| label="Line Thickness" | |
| ) | |
| enable_enhancement = gr.Checkbox( | |
| label="Enable Enhancement", | |
| value=False | |
| ) | |
| with gr.Group(visible=False) as enhancement_controls: | |
| contrast = gr.Slider( | |
| minimum=0.5, | |
| maximum=2.0, | |
| value=1.0, | |
| step=0.1, | |
| label="Contrast" | |
| ) | |
| brightness = gr.Slider( | |
| minimum=0.5, | |
| maximum=1.5, | |
| value=1.0, | |
| step=0.1, | |
| label="Brightness" | |
| ) | |
| with gr.Column(scale=1): | |
| output_image = gr.Image(type="pil", label="Generated Line Drawing") | |
| with gr.Row(): | |
| generate_btn = gr.Button("Generate", variant="primary", size="lg") | |
| clear_btn = gr.Button("Clear", variant="secondary", size="lg") | |
| # Event handlers | |
| enable_enhancement.change( | |
| fn=lambda x: gr.Group(visible=x), | |
| inputs=[enable_enhancement], | |
| outputs=[enhancement_controls] | |
| ) | |
| preset_selector.change( | |
| fn=apply_preset, | |
| inputs=[preset_selector], | |
| outputs=[line_thickness, contrast, brightness, enable_enhancement] | |
| ) | |
| generate_btn.click( | |
| fn=predict, | |
| inputs=[ | |
| input_image, | |
| version, | |
| preset_selector, | |
| line_thickness, | |
| contrast, | |
| brightness, | |
| enable_enhancement, | |
| output_size | |
| ], | |
| outputs=output_image | |
| ) | |
| clear_btn.click( | |
| fn=lambda: (None, "Simple Lines", "Custom", 1.0, 1.0, 1.0, False, "Original"), | |
| inputs=[], | |
| outputs=[ | |
| input_image, | |
| version, | |
| preset_selector, | |
| line_thickness, | |
| contrast, | |
| brightness, | |
| enable_enhancement, | |
| output_size | |
| ] | |
| ) | |
| # Launch the interface with optimized settings | |
| iface.launch( | |
| server_name="0.0.0.0", | |
| server_port=7860, | |
| share=False, | |
| debug=False, | |
| show_error=True, | |
| max_threads=4, | |
| ssr=False, # Disable SSR to prevent Node.js issues | |
| cache_examples=False, # Disable example caching to save memory | |
| ) |