Aiice / Aiice.py
juljuly's picture
Update Aiice.py
46b629b verified
import datasets
import numpy as np
import glob
import os
from datetime import datetime
from huggingface_hub import snapshot_download
import tempfile
_DESCRIPTION = """\
Dataset for Arctic sea ice concentration spatio-temporal forecasting task.
"""
_CITATION = """\
@misc{borisova2025aiice,
author = {Julia Borisova},
title = {Aiice: sea ice concentration forecasting benchmark for AI models},
year = {2025},
publisher = {Hugging Face},
howpublished = {\\url{https://huggingface.co/datasets/ITMO-NSS/Aiice}}
}
"""
_HOMEPAGE = "https://huggingface.co/datasets/ITMO-NSS/Aiice"
logger = datasets.logging.get_logger(__name__)
class Aiice(datasets.GeneratorBasedBuilder):
"""Sea Ice concentration forecasting benchmark dataset."""
VERSION = datasets.Version("1.0.0")
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features({
"date": datasets.Value("string"), # ISO format date "YYYY-MM-DD"
"matrix": datasets.Array2D(shape=(432, 432), dtype="float32"),
"filename": datasets.Value("string"),
}),
homepage=_HOMEPAGE,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Define splits and handle data downloading."""
# Download the entire dataset repository
repo_id = "ITMO-NSS/Aiice"
# Use dl_manager to download files
data_dir = dl_manager.download_and_extract(
f"https://huggingface.co/datasets/{repo_id}/resolve/main/global_series.zip"
)
# If zip doesn't exist, download the entire repo
if not os.path.exists(data_dir):
logger.info("Downloading dataset files...")
data_dir = snapshot_download(
repo_id=repo_id,
repo_type="dataset",
cache_dir=dl_manager.download_cache_dir,
allow_patterns="global_series/**/*.npy"
)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"data_dir": data_dir,
"date_range": ("1979-01-01", "2015-12-31"),
"split_name": "train",
},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={
"data_dir": data_dir,
"date_range": ("2016-01-01", "2020-12-31"),
"split_name": "validation",
},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"data_dir": data_dir,
"date_range": ("2021-01-01", "2025-12-31"),
"split_name": "test",
},
),
]
def _generate_examples(self, data_dir, date_range, split_name):
"""Generate examples for each split."""
# Find all .npy files
npy_files = []
search_path = os.path.join(data_dir, "global_series", "**", "*.npy")
for file_path in glob.glob(search_path, recursive=True):
npy_files.append(file_path)
logger.info(f"Found {len(npy_files)} total files")
start_date = datetime.strptime(date_range[0], "%Y-%m-%d")
end_date = datetime.strptime(date_range[1], "%Y-%m-%d")
filtered_files = []
for file_path in npy_files:
filename = os.path.basename(file_path)
# Extract date from filename like 'osisaf_19790101.npy'
date_str = filename.replace('.npy', '').replace('osisaf_', '')
try:
file_date = datetime.strptime(date_str, "%Y%m%d")
if start_date <= file_date <= end_date:
filtered_files.append((file_path, filename, file_date))
except ValueError as e:
logger.warning(f"Could not parse date from {filename}: {e}")
continue
filtered_files.sort(key=lambda x: x[2])
logger.info(f"After date filtering: {len(filtered_files)} files for {split_name} split")
for idx, (file_path, filename, file_date) in enumerate(filtered_files):
try:
matrix = np.load(file_path).astype(np.float32)
date_iso = file_date.strftime("%Y-%m-%d")
yield idx, {
"date": date_iso,
"matrix": matrix,
"filename": filename,
}
except Exception as e:
logger.warning(f"Could not load {file_path}: {e}")
continue