prithivMLmods commited on
Commit
21ecdbf
·
verified ·
1 Parent(s): 3254c73

update app

Browse files
Files changed (1) hide show
  1. app.py +7 -7
app.py CHANGED
@@ -113,9 +113,9 @@ pipe.load_lora_weights("dx8152/Qwen-Image-Edit-2509-Fusion",
113
  weight_name="溶图.safetensors",
114
  adapter_name="fusion")
115
 
116
- pipe.load_lora_weights("Alissonerdx/BFS-Best-Face-Swap",
117
- weight_name="bfs_face_v1_qwen_image_edit_2509.safetensors",
118
- adapter_name="faceswap")
119
 
120
  try:
121
  pipe.transformer.set_attn_processor(QwenDoubleStreamAttnProcessorFA3())
@@ -162,8 +162,8 @@ def infer(
162
  raise gr.Error("Please upload both images for Fusion/Texture/FaceSwap tasks.")
163
 
164
  if not prompt:
165
- if lora_adapter == "Face-Swap":
166
- prompt = "Swap the face."
167
  elif lora_adapter == "Texture Edit":
168
  prompt = "Apply texture to object."
169
  elif lora_adapter == "Fuse-Objects":
@@ -172,7 +172,7 @@ def infer(
172
  adapters_map = {
173
  "Texture Edit": "texture",
174
  "Fuse-Objects": "fusion",
175
- "Face-Swap": "faceswap",
176
  }
177
 
178
  active_adapter = adapters_map.get(lora_adapter)
@@ -269,7 +269,7 @@ with gr.Blocks(css=css, theme=steel_blue_theme) as demo:
269
  examples=[
270
  ["examples/mug.jpg", "examples/wood.jpg", "Apply wood texture to the mug.", "Texture Edit"],
271
  ["examples/room.jpg", "examples/chair.jpg", "Put the chair in the room naturally.", "Fuse-Objects"],
272
- ["examples/body.jpg", "examples/face.jpg", "Swap the face.", "Face-Swap"],
273
  ],
274
  inputs=[image_1, image_2, prompt, lora_adapter],
275
  outputs=[output_image, seed],
 
113
  weight_name="溶图.safetensors",
114
  adapter_name="fusion")
115
 
116
+ pipe.load_lora_weights("ostris/qwen_image_edit_2509_shirt_design",
117
+ weight_name="qwen_image_edit_2509_shirt_design.safetensors",
118
+ adapter_name="shirt_design")
119
 
120
  try:
121
  pipe.transformer.set_attn_processor(QwenDoubleStreamAttnProcessorFA3())
 
162
  raise gr.Error("Please upload both images for Fusion/Texture/FaceSwap tasks.")
163
 
164
  if not prompt:
165
+ if lora_adapter == "Cloth-Design-Fuse":
166
+ prompt = "Put this design on their shirt."
167
  elif lora_adapter == "Texture Edit":
168
  prompt = "Apply texture to object."
169
  elif lora_adapter == "Fuse-Objects":
 
172
  adapters_map = {
173
  "Texture Edit": "texture",
174
  "Fuse-Objects": "fusion",
175
+ "Cloth-Design-Fuse": "shirt_design",
176
  }
177
 
178
  active_adapter = adapters_map.get(lora_adapter)
 
269
  examples=[
270
  ["examples/mug.jpg", "examples/wood.jpg", "Apply wood texture to the mug.", "Texture Edit"],
271
  ["examples/room.jpg", "examples/chair.jpg", "Put the chair in the room naturally.", "Fuse-Objects"],
272
+ ["examples/body.jpg", "examples/face.jpg", "Put this design on their shirt.", "Cloth-Design-Fuse"],
273
  ],
274
  inputs=[image_1, image_2, prompt, lora_adapter],
275
  outputs=[output_image, seed],