Spaces:
Running
on
CPU Upgrade
Running
on
CPU Upgrade
Commit
·
d6c509d
1
Parent(s):
b3df870
chart loading issue
Browse files
app.py
CHANGED
|
@@ -19,49 +19,41 @@ from tabs.data_exploration import create_exploration_tab, filter_and_display
|
|
| 19 |
|
| 20 |
def create_app():
|
| 21 |
df = load_data()
|
| 22 |
-
|
| 23 |
MODELS = [x.strip() for x in df["Model"].unique().tolist()]
|
| 24 |
|
| 25 |
with gr.Blocks(
|
| 26 |
theme=gr.themes.Soft(font=[gr.themes.GoogleFont("sans-serif")])
|
| 27 |
) as app:
|
| 28 |
with gr.Tabs() as tabs:
|
| 29 |
-
|
| 30 |
-
with gr.Tab("Leaderboard"):
|
| 31 |
lb_output, lb_plot1, lb_plot2 = create_leaderboard_tab(
|
| 32 |
df, CATEGORIES, METHODOLOGY, HEADER_CONTENT, CARDS
|
| 33 |
)
|
| 34 |
|
| 35 |
-
with gr.Tab("Model Comparison"):
|
| 36 |
mc_info, mc_plot = create_model_comparison_tab(df, HEADER_CONTENT)
|
| 37 |
|
| 38 |
-
with gr.Tab("Data Exploration"):
|
| 39 |
exp_outputs = create_exploration_tab(df)
|
| 40 |
|
| 41 |
-
# Initial
|
| 42 |
-
|
| 43 |
fn=lambda: filter_leaderboard(
|
| 44 |
df, "All", list(CATEGORIES.keys())[0], "Performance"
|
| 45 |
),
|
| 46 |
outputs=[lb_output, lb_plot1, lb_plot2],
|
| 47 |
)
|
| 48 |
|
| 49 |
-
|
| 50 |
fn=lambda: compare_models(
|
| 51 |
df, [df.sort_values("Model Avg", ascending=False).iloc[0]["Model"]]
|
| 52 |
),
|
| 53 |
outputs=[mc_info, mc_plot],
|
| 54 |
)
|
| 55 |
|
| 56 |
-
|
| 57 |
fn=lambda: filter_and_display(
|
| 58 |
-
MODELS[0],
|
| 59 |
-
DATASETS[0],
|
| 60 |
-
min(SCORES),
|
| 61 |
-
max(SCORES),
|
| 62 |
-
0,
|
| 63 |
-
0,
|
| 64 |
-
0,
|
| 65 |
),
|
| 66 |
outputs=exp_outputs[:-1],
|
| 67 |
)
|
|
|
|
| 19 |
|
| 20 |
def create_app():
|
| 21 |
df = load_data()
|
|
|
|
| 22 |
MODELS = [x.strip() for x in df["Model"].unique().tolist()]
|
| 23 |
|
| 24 |
with gr.Blocks(
|
| 25 |
theme=gr.themes.Soft(font=[gr.themes.GoogleFont("sans-serif")])
|
| 26 |
) as app:
|
| 27 |
with gr.Tabs() as tabs:
|
| 28 |
+
with gr.Tab("Leaderboard", id=0) as tab1:
|
|
|
|
| 29 |
lb_output, lb_plot1, lb_plot2 = create_leaderboard_tab(
|
| 30 |
df, CATEGORIES, METHODOLOGY, HEADER_CONTENT, CARDS
|
| 31 |
)
|
| 32 |
|
| 33 |
+
with gr.Tab("Model Comparison", id=1) as tab2:
|
| 34 |
mc_info, mc_plot = create_model_comparison_tab(df, HEADER_CONTENT)
|
| 35 |
|
| 36 |
+
with gr.Tab("Data Exploration", id=2) as tab3:
|
| 37 |
exp_outputs = create_exploration_tab(df)
|
| 38 |
|
| 39 |
+
# Initial data loading
|
| 40 |
+
tab1.select(
|
| 41 |
fn=lambda: filter_leaderboard(
|
| 42 |
df, "All", list(CATEGORIES.keys())[0], "Performance"
|
| 43 |
),
|
| 44 |
outputs=[lb_output, lb_plot1, lb_plot2],
|
| 45 |
)
|
| 46 |
|
| 47 |
+
tab2.select(
|
| 48 |
fn=lambda: compare_models(
|
| 49 |
df, [df.sort_values("Model Avg", ascending=False).iloc[0]["Model"]]
|
| 50 |
),
|
| 51 |
outputs=[mc_info, mc_plot],
|
| 52 |
)
|
| 53 |
|
| 54 |
+
tab3.select(
|
| 55 |
fn=lambda: filter_and_display(
|
| 56 |
+
MODELS[0], DATASETS[0], min(SCORES), max(SCORES), 0, 0, 0
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 57 |
),
|
| 58 |
outputs=exp_outputs[:-1],
|
| 59 |
)
|