Spaces:
Paused
Paused
Vladimir Zaigrajew
commited on
Commit
·
4e9a667
1
Parent(s):
a113ff8
Updated with new example
Browse files- app.py +16 -12
- statue.jpg +3 -0
app.py
CHANGED
|
@@ -29,7 +29,7 @@ try:
|
|
| 29 |
# Load pre-computed vocabulary scores and names
|
| 30 |
vocab_scores = np.load(VOCAB_SCORES_PATH)
|
| 31 |
with open(VOCAB_NAMES_PATH, 'r') as f:
|
| 32 |
-
vocab_names = [line.strip() for line in f.readlines()]
|
| 33 |
|
| 34 |
except FileNotFoundError as e:
|
| 35 |
print(f"ERROR: A required file was not found: {e.filename}")
|
|
@@ -99,6 +99,7 @@ def predict(input_img, top_k, concept, neg_concept, max_strength):
|
|
| 99 |
# --- Part B: Concept Manipulation ---
|
| 100 |
|
| 101 |
# Validate the user-provided concept
|
|
|
|
| 102 |
if concept not in vocab_names:
|
| 103 |
raise gr.Error(f"Concept '{concept}' not found in vocabulary. Please choose a valid concept.")
|
| 104 |
|
|
@@ -113,7 +114,7 @@ def predict(input_img, top_k, concept, neg_concept, max_strength):
|
|
| 113 |
if not neg_concept:
|
| 114 |
neg_concept_prompt = f"a photo without {concept}"
|
| 115 |
else:
|
| 116 |
-
neg_concept_prompt = f"a photo with {neg_concept}"
|
| 117 |
|
| 118 |
pos_concept_prompt = f"a photo with {concept}"
|
| 119 |
|
|
@@ -187,6 +188,19 @@ with gr.Blocks(theme=gr.themes.Soft(), title="Matryoshka Sparse Autoencoder (MSA
|
|
| 187 |
"Upload an image to see its top activating concepts from a sparse autoencoder. Then, choose a concept (from `clip_disect_20k.txt`) to visualize how manipulating its corresponding concept magnitude affects the image representation."
|
| 188 |
)
|
| 189 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 190 |
with gr.Row():
|
| 191 |
with gr.Column(scale=1):
|
| 192 |
# Input controls
|
|
@@ -205,16 +219,6 @@ with gr.Blocks(theme=gr.themes.Soft(), title="Matryoshka Sparse Autoencoder (MSA
|
|
| 205 |
output_bar_plot = gr.Plot(label="Top Activating Concepts")
|
| 206 |
output_line_plot = gr.Plot(label="Concept Manipulation Analysis")
|
| 207 |
|
| 208 |
-
gr.Examples(
|
| 209 |
-
examples=[
|
| 210 |
-
["bird.jpg", 10, "birds", "", 10.0],
|
| 211 |
-
],
|
| 212 |
-
inputs=[image_input, top_k_slider, concept_input, neg_concept_input, max_strength_slider],
|
| 213 |
-
outputs=[output_image, output_bar_plot, output_line_plot],
|
| 214 |
-
fn=predict,
|
| 215 |
-
cache_examples=True # Set to True for faster loading on HF Spaces
|
| 216 |
-
)
|
| 217 |
-
|
| 218 |
# Wire up the button to the function
|
| 219 |
submit_btn.click(
|
| 220 |
fn=predict,
|
|
|
|
| 29 |
# Load pre-computed vocabulary scores and names
|
| 30 |
vocab_scores = np.load(VOCAB_SCORES_PATH)
|
| 31 |
with open(VOCAB_NAMES_PATH, 'r') as f:
|
| 32 |
+
vocab_names = [line.strip().lower() for line in f.readlines()]
|
| 33 |
|
| 34 |
except FileNotFoundError as e:
|
| 35 |
print(f"ERROR: A required file was not found: {e.filename}")
|
|
|
|
| 99 |
# --- Part B: Concept Manipulation ---
|
| 100 |
|
| 101 |
# Validate the user-provided concept
|
| 102 |
+
concept = concept.lower().strip()
|
| 103 |
if concept not in vocab_names:
|
| 104 |
raise gr.Error(f"Concept '{concept}' not found in vocabulary. Please choose a valid concept.")
|
| 105 |
|
|
|
|
| 114 |
if not neg_concept:
|
| 115 |
neg_concept_prompt = f"a photo without {concept}"
|
| 116 |
else:
|
| 117 |
+
neg_concept_prompt = f"a photo with {neg_concept.lower().strip()}"
|
| 118 |
|
| 119 |
pos_concept_prompt = f"a photo with {concept}"
|
| 120 |
|
|
|
|
| 188 |
"Upload an image to see its top activating concepts from a sparse autoencoder. Then, choose a concept (from `clip_disect_20k.txt`) to visualize how manipulating its corresponding concept magnitude affects the image representation."
|
| 189 |
)
|
| 190 |
|
| 191 |
+
gr.Examples(
|
| 192 |
+
examples=[
|
| 193 |
+
["./bird.jpg", 10, "birds", "", 10.0],
|
| 194 |
+
["./statue.jpg", 10, "statue", "humans", 10.0],
|
| 195 |
+
],
|
| 196 |
+
# NOTE: You might need to create placeholder images 'bird.jpg' and 'statue.jpg'
|
| 197 |
+
# in your directory for the examples to load correctly.
|
| 198 |
+
inputs=[image_input, top_k_slider, concept_input, neg_concept_input, max_strength_slider],
|
| 199 |
+
outputs=[output_image, output_bar_plot, output_line_plot],
|
| 200 |
+
fn=predict,
|
| 201 |
+
cache_examples=True # Set to True for faster loading on HF Spaces
|
| 202 |
+
)
|
| 203 |
+
|
| 204 |
with gr.Row():
|
| 205 |
with gr.Column(scale=1):
|
| 206 |
# Input controls
|
|
|
|
| 219 |
output_bar_plot = gr.Plot(label="Top Activating Concepts")
|
| 220 |
output_line_plot = gr.Plot(label="Concept Manipulation Analysis")
|
| 221 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 222 |
# Wire up the button to the function
|
| 223 |
submit_btn.click(
|
| 224 |
fn=predict,
|
statue.jpg
ADDED
|
Git LFS Details
|