Peiran commited on
Commit
43656b3
·
1 Parent(s): 591d755

Persist evals to /data CSV and upload per-submission JSONL to dataset repo (peiranli0930/VisEval); add UI feedback

Browse files
Files changed (1) hide show
  1. app.py +65 -46
app.py CHANGED
@@ -1,8 +1,10 @@
1
  import csv
2
  import itertools
 
3
  import os
 
4
  from datetime import datetime
5
- import os
6
  from typing import Dict, List, Tuple
7
 
8
  import gradio as gr
@@ -13,6 +15,8 @@ except Exception: # optional dependency at runtime
13
 
14
 
15
  BASE_DIR = os.path.dirname(__file__)
 
 
16
  TASK_CONFIG = {
17
  "Scene Composition & Object Insertion": {
18
  "folder": "scene_composition_and_object_insertion",
@@ -100,11 +104,31 @@ def _format_pair_header(_pair: Dict[str, str]) -> str:
100
  return ""
101
 
102
 
103
- def _append_evaluation(task_name: str, pair: Dict[str, str], scores: Dict[str, int]) -> None:
104
- csv_path = _csv_path_for_task(task_name, "evaluation_results.csv")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
105
  os.makedirs(os.path.dirname(csv_path), exist_ok=True)
106
  csv_exists = os.path.exists(csv_path)
107
-
108
  fieldnames = [
109
  "eval_date",
110
  "test_id",
@@ -115,64 +139,55 @@ def _append_evaluation(task_name: str, pair: Dict[str, str], scores: Dict[str, i
115
  "model2_res",
116
  "model1_path",
117
  "model2_path",
118
- # Per-image scores for Model A (输出A)
119
  "model1_physical_interaction_fidelity_score",
120
  "model1_optical_effect_accuracy_score",
121
  "model1_semantic_functional_alignment_score",
122
  "model1_overall_photorealism_score",
123
- # Per-image scores for Model B (输出B)
124
  "model2_physical_interaction_fidelity_score",
125
  "model2_optical_effect_accuracy_score",
126
  "model2_semantic_functional_alignment_score",
127
  "model2_overall_photorealism_score",
128
  ]
 
 
 
 
 
 
 
 
 
 
129
 
130
- with open(csv_path, "a", newline="", encoding="utf-8") as csv_file:
131
- writer = csv.DictWriter(csv_file, fieldnames=fieldnames)
132
- if not csv_exists:
133
- writer.writeheader()
134
-
135
- row = {
136
- "eval_date": datetime.utcnow().isoformat(),
137
- "test_id": pair["test_id"],
138
- "model1_name": pair["model1_name"],
139
- "model2_name": pair["model2_name"],
140
- "org_img": pair["org_img"],
141
- "model1_res": pair["model1_res"],
142
- "model2_res": pair["model2_res"],
143
- "model1_path": pair["model1_path"],
144
- "model2_path": pair["model2_path"],
145
- }
146
- row.update(scores)
147
- writer.writerow(row)
148
-
149
- # Optionally push updated CSV to the Space repo if credentials are available
150
- _try_push_to_hub(csv_path)
151
-
152
-
153
- def _try_push_to_hub(csv_path: str) -> None:
154
- """Attempt to commit the CSV to the current Space repo if HF_TOKEN and SPACE_ID exist.
155
- Safe no-op if huggingface_hub isn't available or env vars are missing.
156
  """
157
  if HfApi is None:
158
- return
159
  token = os.environ.get("HF_TOKEN") or os.environ.get("HUGGINGFACEHUB_API_TOKEN")
160
- space_id = os.environ.get("SPACE_ID")
161
- if not token or not space_id:
162
- return
163
  try:
 
 
164
  api = HfApi(token=token)
165
- rel_path = os.path.relpath(csv_path, BASE_DIR).replace(os.sep, "/")
166
- api.upload_file(
167
- path_or_fileobj=csv_path,
168
- path_in_repo=rel_path,
169
- repo_id=space_id,
170
- repo_type="space",
171
- commit_message=f"Update eval CSV: {rel_path} at {datetime.utcnow().isoformat()}",
 
 
 
 
172
  )
 
173
  except Exception:
174
- # Silently ignore push errors to avoid breaking the UI flow
175
- pass
176
 
177
 
178
  def on_task_change(task_name: str, _state_pairs: List[Dict[str, str]]):
@@ -243,10 +258,14 @@ def on_submit(
243
  "model2_semantic_functional_alignment_score": int(b_semantic_score),
244
  "model2_overall_photorealism_score": int(b_overall_score),
245
  }
246
- _append_evaluation(task_name, pair, score_map)
 
 
247
 
248
  next_index = min(index + 1, len(pairs) - 1)
249
  info = f"已保存 Test ID {pair['test_id']} 的评价结果。"
 
 
250
 
251
  if next_index != index:
252
  pair = pairs[next_index]
 
1
  import csv
2
  import itertools
3
+ import json
4
  import os
5
+ import uuid
6
  from datetime import datetime
7
+ from io import BytesIO
8
  from typing import Dict, List, Tuple
9
 
10
  import gradio as gr
 
15
 
16
 
17
  BASE_DIR = os.path.dirname(__file__)
18
+ # Persistent local storage inside HF Spaces
19
+ PERSIST_DIR = os.environ.get("PERSIST_DIR", "/data")
20
  TASK_CONFIG = {
21
  "Scene Composition & Object Insertion": {
22
  "folder": "scene_composition_and_object_insertion",
 
104
  return ""
105
 
106
 
107
+ def _build_eval_row(pair: Dict[str, str], scores: Dict[str, int]) -> Dict[str, object]:
108
+ row = {
109
+ "eval_date": datetime.utcnow().isoformat(),
110
+ "test_id": pair["test_id"],
111
+ "model1_name": pair["model1_name"],
112
+ "model2_name": pair["model2_name"],
113
+ "org_img": pair["org_img"],
114
+ "model1_res": pair["model1_res"],
115
+ "model2_res": pair["model2_res"],
116
+ "model1_path": pair["model1_path"],
117
+ "model2_path": pair["model2_path"],
118
+ }
119
+ row.update(scores)
120
+ return row
121
+
122
+
123
+ def _local_persist_csv_path(task_name: str) -> str:
124
+ folder = TASK_CONFIG[task_name]["folder"]
125
+ return os.path.join(PERSIST_DIR, folder, "evaluation_results.csv")
126
+
127
+
128
+ def _append_local_persist_csv(task_name: str, row: Dict[str, object]) -> bool:
129
+ csv_path = _local_persist_csv_path(task_name)
130
  os.makedirs(os.path.dirname(csv_path), exist_ok=True)
131
  csv_exists = os.path.exists(csv_path)
 
132
  fieldnames = [
133
  "eval_date",
134
  "test_id",
 
139
  "model2_res",
140
  "model1_path",
141
  "model2_path",
 
142
  "model1_physical_interaction_fidelity_score",
143
  "model1_optical_effect_accuracy_score",
144
  "model1_semantic_functional_alignment_score",
145
  "model1_overall_photorealism_score",
 
146
  "model2_physical_interaction_fidelity_score",
147
  "model2_optical_effect_accuracy_score",
148
  "model2_semantic_functional_alignment_score",
149
  "model2_overall_photorealism_score",
150
  ]
151
+ try:
152
+ with open(csv_path, "a", newline="", encoding="utf-8") as csv_file:
153
+ writer = csv.DictWriter(csv_file, fieldnames=fieldnames)
154
+ if not csv_exists:
155
+ writer.writeheader()
156
+ writer.writerow(row)
157
+ return True
158
+ except Exception:
159
+ return False
160
+
161
 
162
+ def _upload_eval_record_to_dataset(task_name: str, row: Dict[str, object]) -> bool:
163
+ """Upload a single-eval JSONL record to a dataset repo.
164
+ Repo is taken from EVAL_REPO_ID env or defaults to 'peiranli0930/VisEval'.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
165
  """
166
  if HfApi is None:
167
+ return False
168
  token = os.environ.get("HF_TOKEN") or os.environ.get("HUGGINGFACEHUB_API_TOKEN")
169
+ repo_id = os.environ.get("EVAL_REPO_ID", "peiranli0930/VisEval")
170
+ if not token or not repo_id:
171
+ return False
172
  try:
173
+ from huggingface_hub import CommitOperationAdd
174
+
175
  api = HfApi(token=token)
176
+ date_prefix = datetime.utcnow().strftime("%Y-%m-%d")
177
+ folder = TASK_CONFIG[task_name]["folder"]
178
+ uid = str(uuid.uuid4())
179
+ path_in_repo = f"submissions/{folder}/{date_prefix}/{uid}.jsonl"
180
+ payload = (json.dumps(row, ensure_ascii=False) + "\n").encode("utf-8")
181
+ operations = [CommitOperationAdd(path_in_repo=path_in_repo, path_or_fileobj=BytesIO(payload))]
182
+ api.create_commit(
183
+ repo_id=repo_id,
184
+ repo_type="dataset",
185
+ operations=operations,
186
+ commit_message=f"Add eval {folder} {row.get('test_id')} {uid}",
187
  )
188
+ return True
189
  except Exception:
190
+ return False
 
191
 
192
 
193
  def on_task_change(task_name: str, _state_pairs: List[Dict[str, str]]):
 
258
  "model2_semantic_functional_alignment_score": int(b_semantic_score),
259
  "model2_overall_photorealism_score": int(b_overall_score),
260
  }
261
+ row = _build_eval_row(pair, score_map)
262
+ ok_local = _append_local_persist_csv(task_name, row)
263
+ ok_hub = _upload_eval_record_to_dataset(task_name, row)
264
 
265
  next_index = min(index + 1, len(pairs) - 1)
266
  info = f"已保存 Test ID {pair['test_id']} 的评价结果。"
267
+ info += " 本地持久化" + ("成功" if ok_local else "失败") + "。"
268
+ info += " 上传Hub" + ("成功" if ok_hub else "失败") + "。"
269
 
270
  if next_index != index:
271
  pair = pairs[next_index]