cpg716 commited on
Commit
b97055c
·
verified ·
1 Parent(s): 21cf6b7

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +23 -22
app.py CHANGED
@@ -10,10 +10,10 @@ from io import BytesIO
10
  import base64
11
  import traceback
12
 
13
- # Function to run the Llama 4 text test
14
  def test_text_generation():
15
  results = []
16
- results.append("=== Testing Text Generation ===")
17
 
18
  try:
19
  # Get token from environment
@@ -32,8 +32,8 @@ def test_text_generation():
32
  results.append(f"Error logging in: {e}")
33
  return "\n".join(results)
34
 
35
- # Use a Llama 4 model that you have access to
36
- model_id = "meta-llama/Llama-4-Maverick-17B-128E-Instruct" # This is in your list
37
 
38
  results.append(f"Loading tokenizer from {model_id}...")
39
  from transformers import AutoTokenizer, AutoModelForCausalLM
@@ -76,7 +76,7 @@ def test_text_generation():
76
  # Function to run the Llama 4 Scout image-text test
77
  def test_image_text_generation():
78
  results = []
79
- results.append("=== Testing Image-Text Generation ===")
80
 
81
  try:
82
  # Get token from environment
@@ -95,8 +95,8 @@ def test_image_text_generation():
95
  results.append(f"Error logging in: {e}")
96
  return "\n".join(results)
97
 
98
- # Use the Llama 4 Scout model that you have access to
99
- model_id = "meta-llama/Llama-4-Scout-17B-16E-Instruct" # This is in your list
100
 
101
  results.append(f"Loading processor and model from {model_id}...")
102
 
@@ -187,10 +187,10 @@ def test_image_text_generation():
187
 
188
  return "\n".join(results)
189
 
190
- # Function to list available Llama models
191
- def list_llama_models():
192
  results = []
193
- results.append("=== Listing Available Llama Models ===")
194
 
195
  try:
196
  # Get token from environment
@@ -214,14 +214,15 @@ def list_llama_models():
214
  api = HfApi(token=token)
215
 
216
  results.append("Fetching models from meta-llama organization...")
217
- results.append("Available models:")
218
 
219
  model_count = 0
220
  for model in api.list_models(author="meta-llama"):
221
- results.append(f"- {model.id}")
222
- model_count += 1
 
223
 
224
- results.append(f"Total models found: {model_count}")
225
  results.append("MODEL LISTING SUCCESSFUL!")
226
 
227
  except Exception as e:
@@ -234,17 +235,17 @@ def list_llama_models():
234
  # Create Gradio interface
235
  with gr.Blocks(title="Llama 4 Scout Test") as demo:
236
  gr.Markdown("# Llama 4 Scout Test")
237
- gr.Markdown("This Space tests the connection to Llama 4 and Llama 4 Scout models.")
238
 
239
- with gr.Tab("List Available Models"):
240
  with gr.Row():
241
  with gr.Column():
242
- list_models_button = gr.Button("List Available Llama Models")
243
  with gr.Column():
244
- models_result = gr.Textbox(label="Available Models", lines=20)
245
 
246
  list_models_button.click(
247
- fn=list_llama_models,
248
  inputs=[],
249
  outputs=[models_result]
250
  )
@@ -279,10 +280,10 @@ with gr.Blocks(title="Llama 4 Scout Test") as demo:
279
  gr.Markdown("""
280
  ## About This Test
281
 
282
- This test checks if your Space can connect to and use Llama 4 and Llama 4 Scout models.
283
 
284
- - The **List Available Models** tab shows all models available from meta-llama
285
- - The **Text Generation Test** uses Llama 4 Maverick for basic text generation
286
  - The **Image-Text Generation Test** uses Llama 4 Scout for image-text generation
287
 
288
  If both tests pass, your Llama 4 Scout setup should work correctly.
 
10
  import base64
11
  import traceback
12
 
13
+ # Function to run the Llama 4 Scout text test
14
  def test_text_generation():
15
  results = []
16
+ results.append("=== Testing Llama 4 Scout Text Generation ===")
17
 
18
  try:
19
  # Get token from environment
 
32
  results.append(f"Error logging in: {e}")
33
  return "\n".join(results)
34
 
35
+ # Use Llama 4 Scout model
36
+ model_id = "meta-llama/Llama-4-Scout-17B-16E-Instruct"
37
 
38
  results.append(f"Loading tokenizer from {model_id}...")
39
  from transformers import AutoTokenizer, AutoModelForCausalLM
 
76
  # Function to run the Llama 4 Scout image-text test
77
  def test_image_text_generation():
78
  results = []
79
+ results.append("=== Testing Llama 4 Scout Image-Text Generation ===")
80
 
81
  try:
82
  # Get token from environment
 
95
  results.append(f"Error logging in: {e}")
96
  return "\n".join(results)
97
 
98
+ # Use the Llama 4 Scout model
99
+ model_id = "meta-llama/Llama-4-Scout-17B-16E-Instruct"
100
 
101
  results.append(f"Loading processor and model from {model_id}...")
102
 
 
187
 
188
  return "\n".join(results)
189
 
190
+ # Function to list available Llama Scout models
191
+ def list_scout_models():
192
  results = []
193
+ results.append("=== Listing Available Llama 4 Scout Models ===")
194
 
195
  try:
196
  # Get token from environment
 
214
  api = HfApi(token=token)
215
 
216
  results.append("Fetching models from meta-llama organization...")
217
+ results.append("Available Scout models:")
218
 
219
  model_count = 0
220
  for model in api.list_models(author="meta-llama"):
221
+ if "Scout" in model.id: # Only show Scout models
222
+ results.append(f"- {model.id}")
223
+ model_count += 1
224
 
225
+ results.append(f"Total Scout models found: {model_count}")
226
  results.append("MODEL LISTING SUCCESSFUL!")
227
 
228
  except Exception as e:
 
235
  # Create Gradio interface
236
  with gr.Blocks(title="Llama 4 Scout Test") as demo:
237
  gr.Markdown("# Llama 4 Scout Test")
238
+ gr.Markdown("This Space tests the connection to Llama 4 Scout models.")
239
 
240
+ with gr.Tab("List Scout Models"):
241
  with gr.Row():
242
  with gr.Column():
243
+ list_models_button = gr.Button("List Llama 4 Scout Models")
244
  with gr.Column():
245
+ models_result = gr.Textbox(label="Available Scout Models", lines=20)
246
 
247
  list_models_button.click(
248
+ fn=list_scout_models,
249
  inputs=[],
250
  outputs=[models_result]
251
  )
 
280
  gr.Markdown("""
281
  ## About This Test
282
 
283
+ This test checks if your Space can connect to and use Llama 4 Scout models.
284
 
285
+ - The **List Scout Models** tab shows all Scout models available from meta-llama
286
+ - The **Text Generation Test** uses Llama 4 Scout for basic text generation
287
  - The **Image-Text Generation Test** uses Llama 4 Scout for image-text generation
288
 
289
  If both tests pass, your Llama 4 Scout setup should work correctly.