Spaces:
Running
Running
| import pandas as pd | |
| import gradio as gr | |
| import csv | |
| import json | |
| import os | |
| import requests | |
| import io | |
| import shutil | |
| from huggingface_hub import Repository | |
| HF_TOKEN = os.environ.get("HF_TOKEN") | |
| TASKS = ["Classification", "VQA", "Retrieval", "Grounding"] | |
| MODEL_INFO = [ | |
| "Rank", "Models", "Model Size(B)", "Data Source", | |
| "Overall", | |
| "Classification", "VQA", "Retrieval", "Grounding" | |
| ] | |
| BASE_COLS = [col for col in MODEL_INFO if col not in TASKS] | |
| DATA_TITLE_TYPE = ['number', 'markdown', 'str', 'markdown', 'number', 'number', 'number', 'number', 'number', 'number', 'number'] | |
| SUBMISSION_NAME = "MMEB" | |
| SUBMISSION_URL = os.path.join("https://huggingface.co/spaces/TIGER-Lab/", SUBMISSION_NAME) | |
| FILE_NAME = "results.csv" | |
| CSV_DIR = "results.csv" | |
| COLUMN_NAMES = MODEL_INFO | |
| LEADERBOARD_INTRODUCTION = """ | |
| # MMEB Leaderboard | |
| ## Introduction | |
| We introduce a novel benchmark, MMEB (Massive Multimodal Embedding Benchmark), | |
| which includes 36 datasets spanning four meta-task categories: classification, visual question answering, retrieval, and visual grounding. MMEB provides a comprehensive framework for training | |
| and evaluating embedding models across various combinations of text and image modalities. | |
| All tasks are reformulated as ranking tasks, where the model follows instructions, processes a query, and selects the correct target from a set of candidates. The query and target can be an image, text, | |
| or a combination of both. MMEB is divided into 20 in-distribution datasets, which can be used for | |
| training, and 16 out-of-distribution datasets, reserved for evaluation. | |
| The detailed explanation of the benchmark and datasets can be found in our paper: https://doi.org/10.48550/arXiv.2410.05160. \n | |
| Github link: https://github.com/TIGER-AI-Lab/VLM2Vec. \n | |
| Overview: https://tiger-ai-lab.github.io/VLM2Vec/. \n | |
| """ | |
| TABLE_INTRODUCTION = """""" | |
| LEADERBOARD_INFO = """ | |
| ## Dataset Summary | |
| MMEB is organized into four primary meta-task categories: | |
| - **Classification**: This category comprises 5 in-distribution and 5 out-of-distribution datasets. Queries | |
| consist of instructions and images, optionally accompanied by related text. Targets are class labels, | |
| and the number of class labels corresponds to the number of classes in the dataset. \n | |
| - IND: ImageNet-1k, N24News, HatefulMemes, VOC2007, SUN397 \n | |
| - OOD: Place365, ImageNet-A, ImageNet-R, ObjectNet, Country-211 \n | |
| - **Visual Question Answering**: This category includes 6 in-distribution and 4 out-of-distribution | |
| datasets. The query consists of an instruction, an image, and a piece of text as the question, while | |
| the target is the answer. Each query has 1,000 target candidates: 1 ground truth and 999 distractors. \n | |
| - IND: OK-VQA, A-OKVQA, DocVQA, InfographicVQA, ChartQA, Visual7W \n | |
| - OOD: ScienceQA, VizWiz, GQA, TextVQA \n | |
| - **Information Retrieval**: This category contains 8 in-distribution and 4 out-of-distribution datasets. | |
| Both the query and target sides can involve a combination of text, images, and instructions. Similar | |
| to the VQA task, each query has 1,000 candidates, with 1 ground truth and 999 distractors. \n | |
| - IND: VisDial, CIRR, VisualNews_t2i, VisualNews_i2t, MSCOCO_t2i, MSCOCO_i2t, NIGHTS, WebQA \n | |
| - OOD: OVEN, FashionIQ, EDIS, Wiki-SS-NQ \n | |
| - **Visual Grounding**: This category includes 1 in-distribution and 3 out-of-distribution datasets, which are adapted from object detection tasks. Queries consist of an instruction, an image, and text referring to a specific region or object within the image. The target may include a cropped image of the object or text describing the same region. Each query includes 1,000 candidates: 1 ground truth and 999 distractors. These distractors may include hard negatives from the same object class, other objects in the image, or random objects from different images. \n | |
| - IND: MSCOCO \n | |
| - OOD: Visual7W-Pointing, RefCOCO, RefCOCO-Matching \n | |
| """ | |
| CITATION_BUTTON_LABEL = "Copy the following snippet to cite these results" | |
| CITATION_BUTTON_TEXT = r"""@article{jiang2024vlm2vec, | |
| title={VLM2Vec: Training Vision-Language Models for Massive Multimodal Embedding Tasks}, | |
| author={Jiang, Ziyan and Meng, Rui and Yang, Xinyi and Yavuz, Semih and Zhou, Yingbo and Chen, Wenhu}, | |
| journal={arXiv preprint arXiv:2410.05160}, | |
| year={2024} | |
| }""" | |
| SUBMIT_INTRODUCTION = """# Submit on MMEB Leaderboard Introduction | |
| ## ⚠ Please note that you need to submit the JSON file with the following format: | |
| ```json | |
| [ | |
| { | |
| "Model": "<Model Name>", | |
| <Optional>"URL": "<Model URL>", | |
| "Model Size(B)": 1000, | |
| "Data Source": Self-Reported, | |
| "Overall": 50.0, | |
| "Classification": 50.0, | |
| "VQA": 50.0, | |
| "Retrieval": 50.0, | |
| "Grounding": 50.0 | |
| }, | |
| ] | |
| ``` | |
| You may refer to the Github page for instructions about evaluating your model. | |
| Github link: https://github.com/TIGER-AI-Lab/VLM2Vec. \n | |
| Please send us an email at [email protected], attaching the JSON file. We will review your submission and update the leaderboard accordingly. | |
| """ | |
| def create_hyperlinked_names(df): | |
| def convert_url(url, model_name): | |
| return f'<a href="{url}">{model_name}</a>' if url is not None else model_name | |
| def add_link_to_model_name(row): | |
| row['Models'] = convert_url(row['URL'], row['Models']) | |
| return row | |
| df = df.copy() | |
| df = df.apply(add_link_to_model_name) | |
| return df | |
| def fetch_data(file: str) -> pd.DataFrame: | |
| # fetch the leaderboard data from remote | |
| if file is None: | |
| raise ValueError("URL Not Provided") | |
| url = f"https://huggingface.co/spaces/TIGER-Lab/MMEB/resolve/main/{file}" | |
| print(f"Fetching data from {url}") | |
| response = requests.get(url) | |
| if response.status_code != 200: | |
| raise requests.HTTPError(f"Failed to fetch data: HTTP status code {response.status_code}") | |
| return pd.read_csv(io.StringIO(response.text)) | |
| def get_urls(file: str='urls.csv') -> dict: | |
| urls = fetch_data(file) | |
| return dict(zip(urls['Models'], urls['URL'])) | |
| def get_df(file="results.jsonl"): | |
| df = fetch_data(file) | |
| df.to_json(file, orient='records', lines=True) | |
| df['Model Size(B)'] = df['Model Size(B)'].apply(process_model_size) | |
| df = df.sort_values(by=['Overall'], ascending=False) | |
| df = create_hyperlinked_names(df) | |
| df['Rank'] = range(1, len(df) + 1) | |
| return df | |
| def refresh_data(): | |
| df = get_df() | |
| return df[COLUMN_NAMES] | |
| def search_and_filter_models(df, query, min_size, max_size): | |
| filtered_df = df.copy() | |
| if query: | |
| filtered_df = filtered_df[filtered_df['Models'].str.contains(query, case=False, na=False)] | |
| size_mask = filtered_df['Model Size(B)'].apply(lambda x: | |
| (min_size <= 1000.0 <= max_size) if x == 'unknown' | |
| else (min_size <= x <= max_size)) | |
| filtered_df = filtered_df[size_mask] | |
| return filtered_df[COLUMN_NAMES] | |
| def search_models(df, query): | |
| if query: | |
| return df[df['Models'].str.contains(query, case=False, na=False)] | |
| return df | |
| def get_size_range(df): | |
| sizes = df['Model Size(B)'].apply(lambda x: 0.0 if x == 'unknown' else x) | |
| if (sizes == 0.0).all(): | |
| return 0.0, 1000.0 | |
| return float(sizes.min()), float(sizes.max()) | |
| def process_model_size(size): | |
| if pd.isna(size) or size == 'unk': | |
| return 'unknown' | |
| try: | |
| val = float(size) | |
| return val | |
| except (ValueError, TypeError): | |
| return 'unknown' | |
| def filter_columns_by_tasks(df, selected_tasks=None): | |
| if selected_tasks is None or len(selected_tasks) == 0: | |
| return df[COLUMN_NAMES] | |
| base_columns = ['Models', 'Model Size(B)', 'Data Source', 'Overall'] | |
| selected_columns = base_columns + selected_tasks | |
| available_columns = [col for col in selected_columns if col in df.columns] | |
| return df[available_columns] | |
| def get_task_choices(): | |
| return TASKS | |