Spaces:
Running
on
Zero
Running
on
Zero
Add MCP
Browse files
app.py
CHANGED
|
@@ -84,6 +84,17 @@ def _calculate_new_dimensions_wan(pil_image, mod_val, calculation_max_area,
|
|
| 84 |
return new_h, new_w
|
| 85 |
|
| 86 |
def handle_image_upload_for_dims_wan(uploaded_pil_image, current_h_val, current_w_val):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 87 |
if uploaded_pil_image is None:
|
| 88 |
return gr.update(value=DEFAULT_H_SLIDER_VALUE), gr.update(value=DEFAULT_W_SLIDER_VALUE)
|
| 89 |
try:
|
|
@@ -137,7 +148,24 @@ def generate_video(
|
|
| 137 |
seed=42,
|
| 138 |
progress=gr.Progress(track_tqdm=True)
|
| 139 |
):
|
| 140 |
-
"""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 141 |
if seed == -1:
|
| 142 |
seed = random.randint(0, sys.maxsize)
|
| 143 |
|
|
@@ -250,4 +278,4 @@ with gr.Blocks(css=css, theme=gr.themes.Soft(), delete_cache=(60, 900)) as demo:
|
|
| 250 |
)
|
| 251 |
|
| 252 |
if __name__ == "__main__":
|
| 253 |
-
demo.launch()
|
|
|
|
| 84 |
return new_h, new_w
|
| 85 |
|
| 86 |
def handle_image_upload_for_dims_wan(uploaded_pil_image, current_h_val, current_w_val):
|
| 87 |
+
"""
|
| 88 |
+
Handle image upload and calculate appropriate dimensions for video generation.
|
| 89 |
+
|
| 90 |
+
Args:
|
| 91 |
+
uploaded_pil_image: The uploaded image (PIL Image or numpy array)
|
| 92 |
+
current_h_val: Current height slider value
|
| 93 |
+
current_w_val: Current width slider value
|
| 94 |
+
|
| 95 |
+
Returns:
|
| 96 |
+
Tuple of gr.update objects for height and width sliders
|
| 97 |
+
"""
|
| 98 |
if uploaded_pil_image is None:
|
| 99 |
return gr.update(value=DEFAULT_H_SLIDER_VALUE), gr.update(value=DEFAULT_W_SLIDER_VALUE)
|
| 100 |
try:
|
|
|
|
| 148 |
seed=42,
|
| 149 |
progress=gr.Progress(track_tqdm=True)
|
| 150 |
):
|
| 151 |
+
"""
|
| 152 |
+
Generate a video from text prompt and optional image using the Wan 2.2 TI2V model.
|
| 153 |
+
|
| 154 |
+
Args:
|
| 155 |
+
image: Optional input image (numpy array) for image-to-video generation
|
| 156 |
+
prompt: Text prompt describing the desired video
|
| 157 |
+
height: Target video height in pixels
|
| 158 |
+
width: Target video width in pixels
|
| 159 |
+
duration_seconds: Desired video duration in seconds
|
| 160 |
+
sampling_steps: Number of denoising steps for video generation
|
| 161 |
+
guide_scale: Guidance scale for classifier-free guidance
|
| 162 |
+
shift: Sample shift parameter for the model
|
| 163 |
+
seed: Random seed for reproducibility (-1 for random)
|
| 164 |
+
progress: Gradio progress tracker
|
| 165 |
+
|
| 166 |
+
Returns:
|
| 167 |
+
Path to the generated video file
|
| 168 |
+
"""
|
| 169 |
if seed == -1:
|
| 170 |
seed = random.randint(0, sys.maxsize)
|
| 171 |
|
|
|
|
| 278 |
)
|
| 279 |
|
| 280 |
if __name__ == "__main__":
|
| 281 |
+
demo.launch(mcp_server=True)
|